init
This commit is contained in:
424
src/AddressAllocator.zig
Normal file
424
src/AddressAllocator.zig
Normal file
@@ -0,0 +1,424 @@
|
||||
const std = @import("std");
|
||||
const mem = std.mem;
|
||||
const sort = std.sort;
|
||||
const testing = std.testing;
|
||||
|
||||
const assert = std.debug.assert;
|
||||
|
||||
const Range = @import("Range.zig");
|
||||
const log = std.log.scoped(.address_allocator);
|
||||
|
||||
const AddressAllocator = @This();
|
||||
|
||||
/// The **sorted** list of `Range`s that are blocked.
|
||||
ranges: std.ArrayListUnmanaged(Range) = .empty,
|
||||
|
||||
pub const empty = AddressAllocator{};
|
||||
|
||||
pub fn deinit(address_allocator: *AddressAllocator, gpa: mem.Allocator) void {
|
||||
address_allocator.ranges.deinit(gpa);
|
||||
}
|
||||
|
||||
/// Block a range to not be used by the `allocate` function. This function will always succeed, if
|
||||
/// there is enough memory available.
|
||||
pub fn block(
|
||||
address_allocator: *AddressAllocator,
|
||||
gpa: mem.Allocator,
|
||||
range: Range,
|
||||
alignment: u64,
|
||||
) !void {
|
||||
assert(address_allocator.isSorted());
|
||||
defer assert(address_allocator.isSorted());
|
||||
|
||||
const aligned_range = if (alignment != 0) range.alignTo(alignment) else range;
|
||||
assert(aligned_range.contains(range));
|
||||
if (aligned_range.size() == 0) return;
|
||||
|
||||
// Find the correct sorted position to insert the new range.
|
||||
const insert_idx = sort.lowerBound(
|
||||
Range,
|
||||
address_allocator.ranges.items,
|
||||
aligned_range,
|
||||
Range.compare,
|
||||
);
|
||||
log.debug(
|
||||
"block: range: {}, alignment: {}, aligned_range: {}, insert_idx: {}",
|
||||
.{ range, alignment, aligned_range, insert_idx },
|
||||
);
|
||||
// If the new range is the greatest one OR if the entry at `insert_idx` is greater than the
|
||||
// new range, we can just insert.
|
||||
if (insert_idx == address_allocator.ranges.items.len or
|
||||
address_allocator.ranges.items[insert_idx].compare(aligned_range) == .gt)
|
||||
{
|
||||
log.debug("block: New range inserted", .{});
|
||||
return address_allocator.ranges.insert(gpa, insert_idx, aligned_range);
|
||||
}
|
||||
errdefer comptime unreachable;
|
||||
assert(address_allocator.ranges.items.len > 0);
|
||||
|
||||
// Now `insert_idx` points to the first entry, that touches `aligned_range`.
|
||||
assert(address_allocator.ranges.items[insert_idx].touches(aligned_range));
|
||||
if (insert_idx > 1 and address_allocator.ranges.items.len > 1) {
|
||||
assert(!address_allocator.ranges.items[insert_idx - 1].touches(aligned_range));
|
||||
}
|
||||
log.debug("block: `aligned_range` touches at least one existing range.", .{});
|
||||
|
||||
// NOTE: We merge entries that touch eachother to speedup future traversals.
|
||||
// There are a few cases how to handle the merging:
|
||||
// 1. `aligned_range` is contained by the existing range. Then we have to do nothing and can
|
||||
// return early.
|
||||
// 2. `aligned_range` contains the existing range. Then we have to overwrite `start` and `end`.
|
||||
// 3. The existing range is before `aligned_range`. Set `existing.end` to `aligned_range.end`.
|
||||
// 4. The existing range is after `aligned_range`. Set `existing.start` to `aligned.start`.
|
||||
// After we have done this to the first range that touches, we will loop over the other ones
|
||||
// that touch and just have to apply rule 4 repeatedly.
|
||||
const first = &address_allocator.ranges.items[insert_idx];
|
||||
if (first.contains(aligned_range)) {
|
||||
log.debug("block: Existing range at index {} contains new range. No-op", .{insert_idx});
|
||||
return;
|
||||
} else if (aligned_range.contains(first.*)) {
|
||||
log.debug(
|
||||
"block: New range contains existing range at index {}: {} -> {}",
|
||||
.{ insert_idx, first, aligned_range },
|
||||
);
|
||||
first.* = aligned_range;
|
||||
} else if (aligned_range.start <= first.end and aligned_range.end >= first.end) {
|
||||
assert(aligned_range.start > first.start);
|
||||
log.debug(
|
||||
"block: Adjusting range end at index {}: {} -> {}",
|
||||
.{ insert_idx, first.end, aligned_range.end },
|
||||
);
|
||||
first.*.end = aligned_range.end;
|
||||
} else if (aligned_range.end >= first.start and aligned_range.start <= first.start) {
|
||||
assert(aligned_range.end < first.end);
|
||||
log.debug(
|
||||
"block: Adjusting range start at index {}: {} -> {}",
|
||||
.{ insert_idx, first.start, aligned_range.start },
|
||||
);
|
||||
first.*.start = aligned_range.start;
|
||||
} else {
|
||||
unreachable;
|
||||
}
|
||||
|
||||
// TODO: comment why we do this
|
||||
if (insert_idx >= address_allocator.ranges.items.len - 1) return;
|
||||
|
||||
var neighbor = &address_allocator.ranges.items[insert_idx + 1];
|
||||
var i: u64 = 0;
|
||||
while (neighbor.touches(aligned_range)) {
|
||||
assert(aligned_range.end >= neighbor.start);
|
||||
assert(aligned_range.start <= neighbor.start);
|
||||
|
||||
if (neighbor.end > first.end) {
|
||||
log.debug(
|
||||
"block: Merging neighbor range at index {}: {} -> {}.",
|
||||
.{ insert_idx + 1, first.end, neighbor.end },
|
||||
);
|
||||
first.end = neighbor.end;
|
||||
}
|
||||
const removed = address_allocator.ranges.orderedRemove(insert_idx + 1);
|
||||
log.debug("block: Removed merged range: {}", .{removed});
|
||||
i += 1;
|
||||
}
|
||||
log.debug("block: Removed {} ranges.", .{i});
|
||||
}
|
||||
|
||||
/// Allocate and block a `Range` of size `size` which will lie inside the given `valid_range`. If no
|
||||
/// allocation of the given size is possible, return `null`.
|
||||
pub fn allocate(
|
||||
address_allocator: *AddressAllocator,
|
||||
gpa: mem.Allocator,
|
||||
size: u64,
|
||||
valid_range: Range,
|
||||
) !?Range {
|
||||
log.debug("allocate: Allocating size {} in range {}", .{ size, valid_range });
|
||||
if (valid_range.size() < size) return null;
|
||||
if (size == 0) return null;
|
||||
const size_i: i64 = @intCast(size);
|
||||
|
||||
// OPTIM: Use binary search to find the start of the valid range inside the reserved ranges.
|
||||
|
||||
// `candidate_start` tracks the beginning of the current free region being examined.
|
||||
var candidate_start = valid_range.start;
|
||||
for (address_allocator.ranges.items) |reserved| {
|
||||
if (candidate_start >= valid_range.end) {
|
||||
log.debug("allocate: Searched past the valid range.", .{});
|
||||
break;
|
||||
}
|
||||
|
||||
// The potential allocation gap is before the current reserved block.
|
||||
if (candidate_start < reserved.start) {
|
||||
// Determine the actual available portion of the gap within our search `range`.
|
||||
const gap_end = @min(reserved.start, valid_range.end);
|
||||
if (gap_end >= candidate_start + size_i) {
|
||||
const new_range = Range{
|
||||
.start = candidate_start,
|
||||
.end = candidate_start + size_i,
|
||||
};
|
||||
try address_allocator.block(gpa, new_range, 0);
|
||||
assert(valid_range.contains(new_range));
|
||||
log.debug("allocate: Found free gap: {}", .{new_range});
|
||||
return new_range;
|
||||
}
|
||||
}
|
||||
|
||||
// The gap was not large enough. Move the candidate start past the current reserved block
|
||||
// for the next iteration.
|
||||
candidate_start = @max(candidate_start, reserved.end);
|
||||
}
|
||||
|
||||
// Check the remaining space at the end of the search range.
|
||||
if (valid_range.end >= candidate_start + size_i) {
|
||||
const new_range = Range{
|
||||
.start = candidate_start,
|
||||
.end = candidate_start + size_i,
|
||||
};
|
||||
try address_allocator.block(gpa, new_range, 0);
|
||||
assert(valid_range.contains(new_range));
|
||||
log.debug("allocate: Found free gap at end: {}", .{new_range});
|
||||
return new_range;
|
||||
}
|
||||
|
||||
log.debug("allocate: No suitable gap found.", .{});
|
||||
return null;
|
||||
}
|
||||
|
||||
fn isSorted(address_allocator: *const AddressAllocator) bool {
|
||||
return sort.isSorted(Range, address_allocator.ranges.items, {}, isSortedInner);
|
||||
}
|
||||
fn isSortedInner(_: void, lhs: Range, rhs: Range) bool {
|
||||
return switch (lhs.compare(rhs)) {
|
||||
.lt => true,
|
||||
.gt => false,
|
||||
.eq => unreachable,
|
||||
};
|
||||
}
|
||||
|
||||
test "block basic" {
|
||||
var aa = AddressAllocator{};
|
||||
defer aa.deinit(testing.allocator);
|
||||
|
||||
try aa.block(testing.allocator, .{ .start = 0, .end = 100 }, 0);
|
||||
try testing.expectEqual(Range{ .start = 0, .end = 100 }, aa.ranges.items[0]);
|
||||
|
||||
try aa.block(testing.allocator, .{ .start = 200, .end = 300 }, 0);
|
||||
try testing.expectEqual(Range{ .start = 0, .end = 100 }, aa.ranges.items[0]);
|
||||
try testing.expectEqual(Range{ .start = 200, .end = 300 }, aa.ranges.items[1]);
|
||||
try testing.expectEqual(2, aa.ranges.items.len);
|
||||
}
|
||||
|
||||
test "block in hole" {
|
||||
var aa = AddressAllocator{};
|
||||
defer aa.deinit(testing.allocator);
|
||||
|
||||
try aa.block(testing.allocator, .{ .start = 0, .end = 100 }, 0);
|
||||
try testing.expectEqual(Range{ .start = 0, .end = 100 }, aa.ranges.items[0]);
|
||||
|
||||
try aa.block(testing.allocator, .{ .start = 400, .end = 500 }, 0);
|
||||
try testing.expectEqual(2, aa.ranges.items.len);
|
||||
try testing.expectEqual(Range{ .start = 0, .end = 100 }, aa.ranges.items[0]);
|
||||
try testing.expectEqual(Range{ .start = 400, .end = 500 }, aa.ranges.items[1]);
|
||||
|
||||
try aa.block(testing.allocator, .{ .start = 200, .end = 300 }, 0);
|
||||
try testing.expectEqual(3, aa.ranges.items.len);
|
||||
try testing.expectEqual(Range{ .start = 0, .end = 100 }, aa.ranges.items[0]);
|
||||
try testing.expectEqual(Range{ .start = 200, .end = 300 }, aa.ranges.items[1]);
|
||||
try testing.expectEqual(Range{ .start = 400, .end = 500 }, aa.ranges.items[2]);
|
||||
}
|
||||
|
||||
test "block touch with previous" {
|
||||
var aa = AddressAllocator{};
|
||||
defer aa.deinit(testing.allocator);
|
||||
|
||||
try aa.block(testing.allocator, .{ .start = 0, .end = 100 }, 0);
|
||||
try aa.block(testing.allocator, .{ .start = 100, .end = 200 }, 0);
|
||||
try testing.expectEqual(Range{ .start = 0, .end = 200 }, aa.ranges.items[0]);
|
||||
try testing.expectEqual(1, aa.ranges.items.len);
|
||||
|
||||
try aa.block(testing.allocator, .{ .start = 100, .end = 300 }, 0);
|
||||
try testing.expectEqual(Range{ .start = 0, .end = 300 }, aa.ranges.items[0]);
|
||||
try testing.expectEqual(1, aa.ranges.items.len);
|
||||
|
||||
try aa.block(testing.allocator, .{ .start = 300, .end = 400 }, 0);
|
||||
try testing.expectEqual(Range{ .start = 0, .end = 400 }, aa.ranges.items[0]);
|
||||
try testing.expectEqual(1, aa.ranges.items.len);
|
||||
}
|
||||
|
||||
test "block touch with following" {
|
||||
var aa = AddressAllocator{};
|
||||
defer aa.deinit(testing.allocator);
|
||||
|
||||
try aa.block(testing.allocator, .{ .start = 200, .end = 300 }, 0);
|
||||
try aa.block(testing.allocator, .{ .start = 100, .end = 200 }, 0);
|
||||
try testing.expectEqual(Range{ .start = 100, .end = 300 }, aa.ranges.items[0]);
|
||||
try testing.expectEqual(1, aa.ranges.items.len);
|
||||
|
||||
try aa.block(testing.allocator, .{ .start = 0, .end = 200 }, 0);
|
||||
try testing.expectEqual(Range{ .start = 0, .end = 300 }, aa.ranges.items[0]);
|
||||
try testing.expectEqual(1, aa.ranges.items.len);
|
||||
|
||||
try aa.block(testing.allocator, .{ .start = -100, .end = 0 }, 0);
|
||||
try testing.expectEqual(Range{ .start = -100, .end = 300 }, aa.ranges.items[0]);
|
||||
try testing.expectEqual(1, aa.ranges.items.len);
|
||||
}
|
||||
|
||||
test "block overlap with previous and following" {
|
||||
var aa = AddressAllocator{};
|
||||
defer aa.deinit(testing.allocator);
|
||||
|
||||
try aa.block(testing.allocator, .{ .start = 0, .end = 100 }, 0);
|
||||
try aa.block(testing.allocator, .{ .start = 200, .end = 300 }, 0);
|
||||
try testing.expectEqual(Range{ .start = 0, .end = 100 }, aa.ranges.items[0]);
|
||||
try testing.expectEqual(Range{ .start = 200, .end = 300 }, aa.ranges.items[1]);
|
||||
try testing.expectEqual(2, aa.ranges.items.len);
|
||||
|
||||
try aa.block(testing.allocator, .{ .start = 50, .end = 250 }, 0);
|
||||
try testing.expectEqual(Range{ .start = 0, .end = 300 }, aa.ranges.items[0]);
|
||||
try testing.expectEqual(1, aa.ranges.items.len);
|
||||
}
|
||||
|
||||
test "block contained by existing" {
|
||||
var aa = AddressAllocator{};
|
||||
defer aa.deinit(testing.allocator);
|
||||
|
||||
try aa.block(testing.allocator, .{ .start = 100, .end = 300 }, 0);
|
||||
try aa.block(testing.allocator, .{ .start = 200, .end = 250 }, 0);
|
||||
try testing.expectEqual(Range{ .start = 100, .end = 300 }, aa.ranges.items[0]);
|
||||
try testing.expectEqual(1, aa.ranges.items.len);
|
||||
}
|
||||
|
||||
test "block contains existing" {
|
||||
var aa = AddressAllocator{};
|
||||
defer aa.deinit(testing.allocator);
|
||||
|
||||
try aa.block(testing.allocator, .{ .start = 50, .end = 100 }, 0);
|
||||
try aa.block(testing.allocator, .{ .start = 0, .end = 200 }, 0);
|
||||
try testing.expectEqual(Range{ .start = 0, .end = 200 }, aa.ranges.items[0]);
|
||||
try testing.expectEqual(1, aa.ranges.items.len);
|
||||
}
|
||||
|
||||
test "block overlaps multiple" {
|
||||
var aa = AddressAllocator{};
|
||||
defer aa.deinit(testing.allocator);
|
||||
|
||||
try aa.block(testing.allocator, .{ .start = 0, .end = 100 }, 0);
|
||||
try aa.block(testing.allocator, .{ .start = 150, .end = 200 }, 0);
|
||||
try aa.block(testing.allocator, .{ .start = 250, .end = 300 }, 0);
|
||||
try aa.block(testing.allocator, .{ .start = 350, .end = 400 }, 0);
|
||||
try aa.block(testing.allocator, .{ .start = 450, .end = 500 }, 0);
|
||||
try testing.expectEqual(5, aa.ranges.items.len);
|
||||
|
||||
try aa.block(testing.allocator, .{ .start = 50, .end = 475 }, 0);
|
||||
try testing.expectEqual(Range{ .start = 0, .end = 500 }, aa.ranges.items[0]);
|
||||
try testing.expectEqual(1, aa.ranges.items.len);
|
||||
}
|
||||
|
||||
test "allocate in empty allocator" {
|
||||
var aa = AddressAllocator{};
|
||||
defer aa.deinit(testing.allocator);
|
||||
|
||||
const search_range = Range{ .start = 0, .end = 1000 };
|
||||
const allocated = try aa.allocate(testing.allocator, 100, search_range);
|
||||
try testing.expectEqual(1, aa.ranges.items.len);
|
||||
try testing.expectEqual(Range{ .start = 0, .end = 100 }, aa.ranges.items[0]);
|
||||
try testing.expectEqual(Range{ .start = 0, .end = 100 }, allocated);
|
||||
}
|
||||
|
||||
test "allocate with no space" {
|
||||
var aa = AddressAllocator{};
|
||||
defer aa.deinit(testing.allocator);
|
||||
|
||||
const range = Range{ .start = 0, .end = 1000 };
|
||||
try aa.block(testing.allocator, range, 0);
|
||||
const allocated = try aa.allocate(testing.allocator, 100, range);
|
||||
try testing.expect(allocated == null);
|
||||
}
|
||||
|
||||
test "allocate in a gap" {
|
||||
var aa = AddressAllocator{};
|
||||
defer aa.deinit(testing.allocator);
|
||||
|
||||
try aa.block(testing.allocator, .{ .start = 0, .end = 100 }, 0);
|
||||
try aa.block(testing.allocator, .{ .start = 200, .end = 300 }, 0);
|
||||
|
||||
const search_range = Range{ .start = 0, .end = 1000 };
|
||||
const allocated = try aa.allocate(testing.allocator, 50, search_range);
|
||||
try testing.expectEqual(Range{ .start = 100, .end = 150 }, allocated);
|
||||
try testing.expectEqual(2, aa.ranges.items.len);
|
||||
try testing.expectEqual(Range{ .start = 0, .end = 150 }, aa.ranges.items[0]);
|
||||
try testing.expectEqual(Range{ .start = 200, .end = 300 }, aa.ranges.items[1]);
|
||||
}
|
||||
|
||||
test "allocate at the end" {
|
||||
var aa = AddressAllocator{};
|
||||
defer aa.deinit(testing.allocator);
|
||||
|
||||
try aa.block(testing.allocator, .{ .start = 0, .end = 100 }, 0);
|
||||
|
||||
const search_range = Range{ .start = 0, .end = 1000 };
|
||||
const allocated = try aa.allocate(testing.allocator, 200, search_range);
|
||||
try testing.expectEqual(Range{ .start = 100, .end = 300 }, allocated);
|
||||
try testing.expectEqual(1, aa.ranges.items.len);
|
||||
try testing.expectEqual(Range{ .start = 0, .end = 300 }, aa.ranges.items[0]);
|
||||
}
|
||||
|
||||
test "allocate within specific search range" {
|
||||
var aa = AddressAllocator{};
|
||||
defer aa.deinit(testing.allocator);
|
||||
|
||||
try aa.block(testing.allocator, .{ .start = 0, .end = 100 }, 0);
|
||||
try aa.block(testing.allocator, .{ .start = 400, .end = 500 }, 0);
|
||||
|
||||
// Search range starts after first block and has a gap
|
||||
const search_range = Range{ .start = 200, .end = 400 };
|
||||
const allocated = try aa.allocate(testing.allocator, 100, search_range);
|
||||
try testing.expectEqual(Range{ .start = 200, .end = 300 }, allocated);
|
||||
try testing.expectEqual(3, aa.ranges.items.len);
|
||||
try testing.expectEqual(Range{ .start = 0, .end = 100 }, aa.ranges.items[0]);
|
||||
try testing.expectEqual(Range{ .start = 400, .end = 500 }, aa.ranges.items[2]);
|
||||
try testing.expectEqual(Range{ .start = 200, .end = 300 }, aa.ranges.items[1]);
|
||||
}
|
||||
|
||||
test "allocate exact gap size" {
|
||||
var aa = AddressAllocator{};
|
||||
defer aa.deinit(testing.allocator);
|
||||
|
||||
try aa.block(testing.allocator, .{ .start = 0, .end = 100 }, 0);
|
||||
try aa.block(testing.allocator, .{ .start = 200, .end = 300 }, 0);
|
||||
|
||||
const search_range = Range{ .start = 0, .end = 1000 };
|
||||
const allocated = try aa.allocate(testing.allocator, 100, search_range);
|
||||
try testing.expectEqual(Range{ .start = 100, .end = 200 }, allocated);
|
||||
try testing.expectEqual(1, aa.ranges.items.len);
|
||||
try testing.expectEqual(Range{ .start = 0, .end = 300 }, aa.ranges.items[0]);
|
||||
}
|
||||
|
||||
test "allocate fails when too large" {
|
||||
var aa = AddressAllocator{};
|
||||
defer aa.deinit(testing.allocator);
|
||||
|
||||
try aa.block(testing.allocator, .{ .start = 0, .end = 100 }, 0);
|
||||
try aa.block(testing.allocator, .{ .start = 200, .end = 300 }, 0);
|
||||
|
||||
const search_range = Range{ .start = 0, .end = 400 };
|
||||
const allocated = try aa.allocate(testing.allocator, 101, search_range);
|
||||
try std.testing.expect(allocated == null);
|
||||
}
|
||||
|
||||
test "allocate with zero size" {
|
||||
var aa = AddressAllocator{};
|
||||
defer aa.deinit(testing.allocator);
|
||||
|
||||
const search_range = Range{ .start = 0, .end = 1000 };
|
||||
const allocated = try aa.allocate(testing.allocator, 0, search_range);
|
||||
try std.testing.expect(allocated == null);
|
||||
}
|
||||
|
||||
test "allocate with size bigger than range" {
|
||||
var aa = AddressAllocator{};
|
||||
defer aa.deinit(testing.allocator);
|
||||
|
||||
const search_range = Range{ .start = 0, .end = 100 };
|
||||
const allocated = try aa.allocate(testing.allocator, 1000, search_range);
|
||||
try std.testing.expect(allocated == null);
|
||||
}
|
||||
435
src/PatchLocationIterator.zig
Normal file
435
src/PatchLocationIterator.zig
Normal file
@@ -0,0 +1,435 @@
|
||||
//! Iterates through all possible valid address ranges for a `jmp rel32` instruction based on a
|
||||
//! 4-byte pattern of "free" and "used" bytes.
|
||||
//!
|
||||
//! This is the core utility for implementing E9Patch-style instruction punning (B2) and padded
|
||||
//! jumps (T1).
|
||||
const std = @import("std");
|
||||
const testing = std.testing;
|
||||
const assert = std.debug.assert;
|
||||
|
||||
const log = std.log.scoped(.patch_location_iterator);
|
||||
|
||||
const Range = @import("Range.zig");
|
||||
|
||||
/// Represents a single byte in the 4-byte `rel32` offset pattern.
|
||||
pub const PatchByte = union(enum) {
|
||||
/// This byte can be any value (0x00-0xFF).
|
||||
free: void,
|
||||
/// This byte is constrained to a specific value.
|
||||
used: u8,
|
||||
|
||||
pub fn format(self: @This(), writer: *std.Io.Writer) std.Io.Writer.Error!void {
|
||||
switch (self) {
|
||||
.free => try writer.print("free", .{}),
|
||||
.used => |val| try writer.print("used({x})", .{val}),
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
const patch_size = 4;
|
||||
const PatchInt = std.meta.Int(.signed, patch_size * 8);
|
||||
const PatchLocationIterator = @This();
|
||||
/// The base address (e.g., RIP of the *next* instruction) that the 32-bit relative offset is
|
||||
/// calculated from.
|
||||
offset: i64,
|
||||
/// The 4-byte little-endian pattern of `used` and `free` bytes that constrain the `rel32` offset.
|
||||
patch_bytes: [patch_size]PatchByte,
|
||||
/// Internal state: the byte-level representation of the *start* of the current `rel32` offset being
|
||||
/// iterated.
|
||||
start: [patch_size]u8,
|
||||
/// Internal state: the byte-level representation of the *end* of the current `rel32` offset being
|
||||
/// iterated.
|
||||
end: [patch_size]u8,
|
||||
/// Internal state: flag to handle the first call to `next()` uniquely.
|
||||
first: bool,
|
||||
/// Internal state: optimization cache for the number of contiguous `.free` bytes at the *end* of
|
||||
/// `patch_bytes`.
|
||||
trailing_free_count: u8,
|
||||
|
||||
/// Initializes the iterator.
|
||||
/// - `patch_bytes`: The 4-byte pattern of the `rel32` offset, in little-endian order.
|
||||
/// - `offset`: The address of the *next* instruction (i.e., `RIP` after the 5-byte `jmp`).
|
||||
/// All returned ranges will be relative to this offset.
|
||||
pub fn init(patch_bytes: [patch_size]PatchByte, offset: u64) PatchLocationIterator {
|
||||
log.debug("hi", .{});
|
||||
assert(patch_bytes.len == patch_size);
|
||||
|
||||
// Find the number of contiguous free bytes at the end of the pattern.
|
||||
var trailing_free: u8 = 0;
|
||||
for (0..patch_bytes.len) |i| {
|
||||
if (patch_bytes[i] == .free) {
|
||||
trailing_free += 1;
|
||||
} else {
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
var start = std.mem.zeroes([patch_size]u8);
|
||||
var end = std.mem.zeroes([patch_size]u8);
|
||||
for (patch_bytes, 0..) |byte, i| {
|
||||
switch (byte) {
|
||||
.free => {
|
||||
start[i] = 0;
|
||||
end[i] = if (i < trailing_free) 0xff else 0;
|
||||
},
|
||||
.used => |val| {
|
||||
start[i] = val;
|
||||
end[i] = val;
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
const out = PatchLocationIterator{
|
||||
.offset = @intCast(offset),
|
||||
.patch_bytes = patch_bytes,
|
||||
.trailing_free_count = trailing_free,
|
||||
.start = start,
|
||||
.end = end,
|
||||
.first = true,
|
||||
};
|
||||
log.debug("init: {f}", .{out});
|
||||
return out;
|
||||
}
|
||||
|
||||
/// Returns the next valid `Range` of target addresses, or `null` if the iteration is complete.
|
||||
pub fn next(self: *PatchLocationIterator) ?Range {
|
||||
defer self.first = false;
|
||||
|
||||
// If the first byte is predetermined and the offset would always be negative we don't need to
|
||||
// iterate.
|
||||
const last_byte = self.patch_bytes[patch_size - 1];
|
||||
if (last_byte == .used and last_byte.used & 0x80 != 0) {
|
||||
log.info(
|
||||
"next: Search aborted, pattern has predetermined negative offset (last_byte=0x{x})",
|
||||
.{last_byte.used},
|
||||
);
|
||||
return null;
|
||||
}
|
||||
|
||||
// If all bytes are free we can just return the maximum range.
|
||||
if (self.trailing_free_count == patch_size) {
|
||||
if (self.first) {
|
||||
const range = Range{ .start = self.offset, .end = self.offset + std.math.maxInt(i32) };
|
||||
log.debug("next: All bytes free, returning full positive range: {}", .{range});
|
||||
return range;
|
||||
} else {
|
||||
log.info("next: All bytes free, iteration finished.", .{});
|
||||
return null;
|
||||
}
|
||||
}
|
||||
|
||||
if (self.first) {
|
||||
const range = Range{
|
||||
.start = std.mem.readInt(PatchInt, self.start[0..], .little) + self.offset,
|
||||
.end = std.mem.readInt(PatchInt, self.end[0..], .little) + self.offset,
|
||||
};
|
||||
log.debug("next: First call, returning initial range: {}", .{range});
|
||||
return range;
|
||||
}
|
||||
|
||||
var overflow: u1 = 1;
|
||||
for (self.patch_bytes, 0..) |byte, i| {
|
||||
if (i < self.trailing_free_count or byte == .used) {
|
||||
continue;
|
||||
}
|
||||
assert(byte == .free);
|
||||
assert(self.start[i] == self.end[i]);
|
||||
defer assert(self.start[i] == self.end[i]);
|
||||
|
||||
if (overflow == 1) {
|
||||
const max: u8 = if (i < patch_size - 1) std.math.maxInt(u8) else std.math.maxInt(i8);
|
||||
if (self.start[i] == max) {
|
||||
self.start[i] = 0;
|
||||
self.end[i] = 0;
|
||||
} else {
|
||||
self.start[i] += 1;
|
||||
self.end[i] += 1;
|
||||
overflow = 0;
|
||||
}
|
||||
}
|
||||
}
|
||||
if (overflow == 1) {
|
||||
log.info("next: Iteration finished, no more ranges.", .{});
|
||||
return null;
|
||||
}
|
||||
|
||||
const start = std.mem.readInt(PatchInt, self.start[0..], .little);
|
||||
const end = std.mem.readInt(PatchInt, self.end[0..], .little);
|
||||
assert(end >= start);
|
||||
const range = Range{
|
||||
.start = start + self.offset,
|
||||
.end = end + self.offset,
|
||||
};
|
||||
log.debug("next: new range: {}", .{range});
|
||||
return range;
|
||||
}
|
||||
|
||||
pub fn format(self: PatchLocationIterator, writer: *std.Io.Writer) std.Io.Writer.Error!void {
|
||||
try writer.print(".{{ ", .{});
|
||||
try writer.print(".offset = {x}, ", .{self.offset});
|
||||
try writer.print(
|
||||
".patch_bytes = .{{ {f}, {f}, {f}, {f} }}, ",
|
||||
.{ self.patch_bytes[0], self.patch_bytes[1], self.patch_bytes[2], self.patch_bytes[3] },
|
||||
);
|
||||
try writer.print(
|
||||
".start: 0x{x}, .end: 0x{x}, first: {}, trailing_free_count: {}",
|
||||
.{ self.start, self.end, self.first, self.trailing_free_count },
|
||||
);
|
||||
}
|
||||
|
||||
test "free bytes" {
|
||||
const pattern = [_]PatchByte{
|
||||
.{ .free = {} },
|
||||
.{ .free = {} },
|
||||
.{ .free = {} },
|
||||
.{ .free = {} },
|
||||
};
|
||||
var it = PatchLocationIterator.init(pattern, 0);
|
||||
|
||||
try testing.expectEqual(
|
||||
Range{ .start = 0x00000000, .end = 0x7fffffff },
|
||||
it.next().?,
|
||||
);
|
||||
try testing.expectEqual(null, it.next());
|
||||
}
|
||||
|
||||
test "predetermined negative" {
|
||||
const pattern = [_]PatchByte{
|
||||
.{ .free = {} },
|
||||
.{ .free = {} },
|
||||
.{ .free = {} },
|
||||
.{ .used = 0xe9 },
|
||||
};
|
||||
var it = PatchLocationIterator.init(pattern, 0);
|
||||
try testing.expectEqual(null, it.next());
|
||||
it = PatchLocationIterator.init(pattern, 0x12345678);
|
||||
try testing.expectEqual(null, it.next());
|
||||
}
|
||||
|
||||
test "trailing free bytes" {
|
||||
const pattern = [_]PatchByte{
|
||||
.{ .free = {} },
|
||||
.{ .free = {} },
|
||||
.{ .free = {} },
|
||||
.{ .used = 0x79 },
|
||||
};
|
||||
var it = PatchLocationIterator.init(pattern, 0);
|
||||
|
||||
try testing.expectEqual(
|
||||
Range{ .start = 0x79000000, .end = 0x79ffffff },
|
||||
it.next().?,
|
||||
);
|
||||
try testing.expectEqual(null, it.next());
|
||||
}
|
||||
|
||||
test "inner and trailing free bytes" {
|
||||
const pattern = [_]PatchByte{
|
||||
.{ .free = {} },
|
||||
.{ .used = 0xe8 },
|
||||
.{ .free = {} },
|
||||
.{ .used = 0x79 },
|
||||
};
|
||||
var it = PatchLocationIterator.init(pattern, 0);
|
||||
|
||||
try testing.expectEqual(
|
||||
Range{ .start = 0x7900e800, .end = 0x7900e8ff },
|
||||
it.next().?,
|
||||
);
|
||||
try testing.expectEqual(
|
||||
Range{ .start = 0x7901e800, .end = 0x7901e8ff },
|
||||
it.next().?,
|
||||
);
|
||||
|
||||
// Skip to the last range
|
||||
var r_last: ?Range = null;
|
||||
var count: u32 = 2; // We already consumed two
|
||||
while (it.next()) |r| {
|
||||
r_last = r;
|
||||
count += 1;
|
||||
}
|
||||
try testing.expectEqual(
|
||||
Range{ .start = 0x79ffe800, .end = 0x79ffe8ff },
|
||||
r_last,
|
||||
);
|
||||
try testing.expectEqual(256, count);
|
||||
}
|
||||
|
||||
test "no free bytes" {
|
||||
const pattern = [_]PatchByte{
|
||||
.{ .used = 0xe9 },
|
||||
.{ .used = 0x00 },
|
||||
.{ .used = 0x00 },
|
||||
.{ .used = 0x78 },
|
||||
};
|
||||
var it = PatchLocationIterator.init(pattern, 0);
|
||||
|
||||
try testing.expectEqual(
|
||||
Range{ .start = 0x780000e9, .end = 0x780000e9 },
|
||||
it.next().?,
|
||||
);
|
||||
try testing.expectEqual(null, it.next());
|
||||
}
|
||||
|
||||
test "inner and leading free bytes" {
|
||||
const pattern = [_]PatchByte{
|
||||
.{ .used = 0xe9 },
|
||||
.{ .free = {} },
|
||||
.{ .used = 0xe8 },
|
||||
.{ .free = {} },
|
||||
};
|
||||
var it = PatchLocationIterator.init(pattern, 0);
|
||||
|
||||
try testing.expectEqual(
|
||||
Range{ .start = 0x00e800e9, .end = 0x00e800e9 },
|
||||
it.next().?,
|
||||
);
|
||||
try testing.expectEqual(
|
||||
Range{ .start = 0x00e801e9, .end = 0x00e801e9 },
|
||||
it.next().?,
|
||||
);
|
||||
|
||||
// Skip to the last range
|
||||
var r_last: ?Range = null;
|
||||
var count: u32 = 2; // We already consumed two
|
||||
while (it.next()) |r| {
|
||||
r_last = r;
|
||||
count += 1;
|
||||
}
|
||||
try testing.expectEqual(
|
||||
Range{ .start = 0x7fe8ffe9, .end = 0x7fe8ffe9 },
|
||||
r_last,
|
||||
);
|
||||
try testing.expectEqual(256 * 128, count);
|
||||
}
|
||||
|
||||
test "only inner" {
|
||||
const pattern = [_]PatchByte{
|
||||
.{ .used = 0xe9 },
|
||||
.{ .free = {} },
|
||||
.{ .free = {} },
|
||||
.{ .used = 0x78 },
|
||||
};
|
||||
var it = PatchLocationIterator.init(pattern, 0);
|
||||
|
||||
try testing.expectEqual(
|
||||
Range{ .start = 0x780000e9, .end = 0x780000e9 },
|
||||
it.next().?,
|
||||
);
|
||||
try testing.expectEqual(
|
||||
Range{ .start = 0x780001e9, .end = 0x780001e9 },
|
||||
it.next().?,
|
||||
);
|
||||
|
||||
// Skip to the last range
|
||||
var r_last: ?Range = null;
|
||||
var count: u32 = 2; // We already consumed two
|
||||
while (it.next()) |r| {
|
||||
r_last = r;
|
||||
count += 1;
|
||||
}
|
||||
try testing.expectEqual(
|
||||
Range{ .start = 0x78ffffe9, .end = 0x78ffffe9 },
|
||||
r_last,
|
||||
);
|
||||
try testing.expectEqual(256 * 256, count);
|
||||
}
|
||||
|
||||
test "trailing free bytes offset" {
|
||||
const pattern = [_]PatchByte{
|
||||
.{ .free = {} },
|
||||
.{ .free = {} },
|
||||
.{ .free = {} },
|
||||
.{ .used = 0x79 },
|
||||
};
|
||||
const offset = 0x12345678;
|
||||
var it = PatchLocationIterator.init(pattern, offset);
|
||||
|
||||
try testing.expectEqual(
|
||||
Range{ .start = offset + 0x79000000, .end = offset + 0x79ffffff },
|
||||
it.next().?,
|
||||
);
|
||||
try testing.expectEqual(null, it.next());
|
||||
}
|
||||
|
||||
test "trailing and leading offset" {
|
||||
const pattern = [_]PatchByte{
|
||||
.{ .free = {} },
|
||||
.{ .used = 0xe9 },
|
||||
.{ .used = 0xe8 },
|
||||
.{ .free = {} },
|
||||
};
|
||||
const offset = 0x12345678;
|
||||
var it = PatchLocationIterator.init(pattern, offset);
|
||||
|
||||
try testing.expectEqual(
|
||||
Range{ .start = offset + 0x00e8e900, .end = offset + 0x00e8e9ff },
|
||||
it.next().?,
|
||||
);
|
||||
try testing.expectEqual(
|
||||
Range{ .start = offset + 0x01e8e900, .end = offset + 0x01e8e9ff },
|
||||
it.next().?,
|
||||
);
|
||||
|
||||
// Skip to the last range
|
||||
var r_last: ?Range = null;
|
||||
var count: u32 = 2; // We already consumed two
|
||||
while (it.next()) |r| {
|
||||
r_last = r;
|
||||
count += 1;
|
||||
}
|
||||
try testing.expectEqual(
|
||||
Range{ .start = offset + 0x7fe8e900, .end = offset + 0x7fe8e9ff },
|
||||
r_last,
|
||||
);
|
||||
try testing.expectEqual(128, count);
|
||||
}
|
||||
|
||||
test "trailing free bytes large offset" {
|
||||
const pattern = [_]PatchByte{
|
||||
.{ .free = {} },
|
||||
.{ .free = {} },
|
||||
.{ .free = {} },
|
||||
.{ .used = 0x79 },
|
||||
};
|
||||
const offset = 0x12345678;
|
||||
var it = PatchLocationIterator.init(pattern, offset);
|
||||
|
||||
try testing.expectEqual(
|
||||
Range{ .start = offset + 0x79000000, .end = offset + 0x79ffffff },
|
||||
it.next().?,
|
||||
);
|
||||
try testing.expectEqual(null, it.next());
|
||||
}
|
||||
|
||||
test "trailing and leading large offset" {
|
||||
const pattern = [_]PatchByte{
|
||||
.{ .free = {} },
|
||||
.{ .used = 0xe9 },
|
||||
.{ .used = 0xe8 },
|
||||
.{ .free = {} },
|
||||
};
|
||||
const offset = 0x12345678;
|
||||
var it = PatchLocationIterator.init(pattern, offset);
|
||||
|
||||
try testing.expectEqual(
|
||||
Range{ .start = offset + 0x00e8e900, .end = offset + 0x00e8e9ff },
|
||||
it.next().?,
|
||||
);
|
||||
try testing.expectEqual(
|
||||
Range{ .start = offset + 0x01e8e900, .end = offset + 0x01e8e9ff },
|
||||
it.next().?,
|
||||
);
|
||||
|
||||
// Skip to the last range
|
||||
var r_last: ?Range = null;
|
||||
var count: u32 = 2; // We already consumed two
|
||||
while (it.next()) |r| {
|
||||
r_last = r;
|
||||
count += 1;
|
||||
}
|
||||
try testing.expectEqual(
|
||||
Range{ .start = offset + 0x7fe8e900, .end = offset + 0x7fe8e9ff },
|
||||
r_last,
|
||||
);
|
||||
try testing.expectEqual(128, count);
|
||||
}
|
||||
272
src/Patcher.zig
Normal file
272
src/Patcher.zig
Normal file
@@ -0,0 +1,272 @@
|
||||
const std = @import("std");
|
||||
const builtin = @import("builtin");
|
||||
const testing = std.testing;
|
||||
const math = std.math;
|
||||
const mem = std.mem;
|
||||
const posix = std.posix;
|
||||
const zydis = @import("zydis").zydis;
|
||||
const disassembler = @import("disassembler.zig");
|
||||
|
||||
const log = std.log.scoped(.patcher);
|
||||
const AddressAllocator = @import("AddressAllocator.zig");
|
||||
const InstructionFormatter = disassembler.InstructionFormatter;
|
||||
const InstructionIterator = disassembler.InstructionIterator;
|
||||
const PatchLocationIterator = @import("PatchLocationIterator.zig");
|
||||
const Range = @import("Range.zig");
|
||||
|
||||
const assert = std.debug.assert;
|
||||
|
||||
const page_size = 4096;
|
||||
const jump_rel32: u8 = 0xe9;
|
||||
const jump_rel32_size = 5;
|
||||
const jump_rel8: u8 = 0xeb;
|
||||
const jump_rel8_size = 2;
|
||||
const max_ins_bytes = 15;
|
||||
// Based on the paper 'x86-64 Instruction Usage among C/C++ Applications' by 'Akshintala et al.'
|
||||
// it's '4.25' bytes, so 4 is good enough. (https://oscarlab.github.io/papers/instrpop-systor19.pdf)
|
||||
const avg_ins_bytes = 4;
|
||||
|
||||
// TODO: Find an invalid instruction to use.
|
||||
// const invalid: u8 = 0xaa;
|
||||
const int3: u8 = 0xcc;
|
||||
|
||||
// Prefixes for Padded Jumps (Tactic T1)
|
||||
const prefix_fs: u8 = 0x64;
|
||||
const prefix_gs: u8 = 0x65;
|
||||
const prefix_ss: u8 = 0x36;
|
||||
const prefixes = [_]u8{ prefix_fs, prefix_gs, prefix_ss };
|
||||
|
||||
const Patcher = @This();
|
||||
|
||||
gpa: mem.Allocator,
|
||||
flicken: std.StringArrayHashMapUnmanaged(Flicken) = .empty,
|
||||
address_allocator: AddressAllocator = .empty,
|
||||
/// Tracks the base addresses of pages we have mmap'd for Flicken.
|
||||
allocated_pages: std.AutoHashMapUnmanaged(u64, void) = .empty,
|
||||
|
||||
pub fn init(gpa: mem.Allocator) !Patcher {
|
||||
var flicken: std.StringArrayHashMapUnmanaged(Flicken) = .empty;
|
||||
try flicken.ensureTotalCapacity(gpa, 8);
|
||||
flicken.putAssumeCapacity("nop", .{ .name = "nop", .bytes = &.{} });
|
||||
return .{
|
||||
.gpa = gpa,
|
||||
.flicken = flicken,
|
||||
};
|
||||
}
|
||||
|
||||
pub fn deinit(patcher: *Patcher) void {
|
||||
_ = patcher;
|
||||
}
|
||||
|
||||
/// Flicken name and bytes have to be valid for the lifetime it's used. If a trampoline with the
|
||||
/// name is already registered it gets overwritten.
|
||||
/// NOTE: The name "nop" is reserved and always has the ID 0.
|
||||
pub fn addFlicken(patcher: *Patcher, trampoline: Flicken) !FlickenId {
|
||||
assert(!mem.eql(u8, "nop", trampoline.name));
|
||||
try patcher.flicken.ensureUnusedCapacity(patcher.gpa, 1);
|
||||
errdefer comptime unreachable;
|
||||
|
||||
const gop = patcher.flicken.getOrPutAssumeCapacity(trampoline.name);
|
||||
if (gop.found_existing) {
|
||||
log.warn("addTrampoline: Overwriting existing trampoline: {s}", .{trampoline.name});
|
||||
}
|
||||
gop.key_ptr.* = trampoline.name;
|
||||
gop.value_ptr.* = trampoline;
|
||||
return @enumFromInt(gop.index);
|
||||
}
|
||||
|
||||
pub const Flicken = struct {
|
||||
name: []const u8,
|
||||
bytes: []const u8,
|
||||
|
||||
pub fn size(flicken: *const Flicken) u64 {
|
||||
return flicken.bytes.len + jump_rel32_size;
|
||||
}
|
||||
};
|
||||
|
||||
pub const FlickenId = enum(u64) { nop = 0, _ };
|
||||
|
||||
pub const PatchRequest = struct {
|
||||
/// Must point to first byte of an instruction.
|
||||
flicken: FlickenId,
|
||||
/// Bytes of the instruction. Can be used to get the address of the instruction.
|
||||
bytes: []u8,
|
||||
|
||||
pub fn desc(_: void, lhs: PatchRequest, rhs: PatchRequest) bool {
|
||||
return @intFromPtr(lhs.bytes.ptr) > @intFromPtr(rhs.bytes.ptr);
|
||||
}
|
||||
|
||||
pub fn format(
|
||||
self: @This(),
|
||||
writer: *std.Io.Writer,
|
||||
) std.Io.Writer.Error!void {
|
||||
try writer.print(
|
||||
".{{ .address = 0x{x}, .bytes = 0x{x}, .flicken = {} }}",
|
||||
.{ @intFromPtr(self.bytes.ptr), self.bytes, @intFromEnum(self.flicken) },
|
||||
);
|
||||
}
|
||||
};
|
||||
|
||||
pub fn patchRegion(patcher: *Patcher, region: []align(page_size) u8) !void {
|
||||
{
|
||||
// Block the region, such that we don't try to allocate there anymore.
|
||||
const start: i64 = @intCast(@intFromPtr(region.ptr));
|
||||
try patcher.address_allocator.block(
|
||||
patcher.gpa,
|
||||
.{ .start = start, .end = start + @as(i64, @intCast(region.len)) },
|
||||
page_size,
|
||||
);
|
||||
}
|
||||
|
||||
var arena_impl = std.heap.ArenaAllocator.init(patcher.gpa);
|
||||
const arena = arena_impl.allocator();
|
||||
defer arena_impl.deinit();
|
||||
var patch_requests: std.ArrayListUnmanaged(PatchRequest) = .empty;
|
||||
|
||||
{
|
||||
// Get where to patch.
|
||||
var instruction_iterator = InstructionIterator.init(region);
|
||||
while (instruction_iterator.next()) |instruction| {
|
||||
const should_patch: bool = instruction.instruction.attributes & zydis.ZYDIS_ATTRIB_HAS_LOCK > 0;
|
||||
if (should_patch) {
|
||||
const start = instruction.address - @intFromPtr(region.ptr);
|
||||
const request: PatchRequest = .{
|
||||
.bytes = region[start..][0..instruction.instruction.length],
|
||||
.flicken = .nop,
|
||||
};
|
||||
try patch_requests.append(arena, request);
|
||||
}
|
||||
}
|
||||
log.info("patchRegion: Got {} patch requests", .{patch_requests.items.len});
|
||||
}
|
||||
|
||||
// Sort patch requests in descending order by address, such that we patch from back to front.
|
||||
mem.sortUnstable(PatchRequest, patch_requests.items, {}, PatchRequest.desc);
|
||||
|
||||
{
|
||||
// Check for duplicate patch requests and undefined IDs
|
||||
var last_address: ?[*]u8 = null;
|
||||
for (patch_requests.items, 0..) |request, i| {
|
||||
if (last_address) |last| {
|
||||
if (last == request.bytes.ptr) {
|
||||
var buffer: [256]u8 = undefined;
|
||||
const fmt = disassembler.formatBytes(request.bytes, &buffer);
|
||||
log.err(
|
||||
"patchRegion: Found duplicate patch requests for instruction: {s}",
|
||||
.{fmt},
|
||||
);
|
||||
log.err("patchRegion: request 1: {f}", .{patch_requests.items[i - 1]});
|
||||
log.err("patchRegion: request 2: {f}", .{patch_requests.items[i]});
|
||||
return error.DuplicatePatchRequest;
|
||||
}
|
||||
}
|
||||
last_address = request.bytes.ptr;
|
||||
|
||||
if (@as(u64, @intFromEnum(request.flicken)) >= patcher.flicken.count()) {
|
||||
var buffer: [256]u8 = undefined;
|
||||
const fmt = disassembler.formatBytes(request.bytes, &buffer);
|
||||
log.err(
|
||||
"patchRegion: Usage of undefined flicken in request {f} for instruction: {s}",
|
||||
.{ request, fmt },
|
||||
);
|
||||
return error.undefinedFlicken;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
{
|
||||
// Apply patches.
|
||||
try posix.mprotect(region, posix.PROT.READ | posix.PROT.WRITE);
|
||||
defer posix.mprotect(region, posix.PROT.READ | posix.PROT.EXEC) catch
|
||||
@panic("patchRegion: mprotect back to R|X failed. Can't continue");
|
||||
|
||||
// PERF: A set of the pages for the patches/flicken we made writable. This way we don't
|
||||
// repeatedly change call `mprotect` on the same page to switch it from R|W to R|X and back.
|
||||
// At the end we `mprotect` all pages in this set back to being R|X.
|
||||
var pages_made_writable: std.AutoHashMapUnmanaged(u64, void) = .empty;
|
||||
for (patch_requests.items) |request| {
|
||||
const flicken = patcher.flicken.entries.get(@intFromEnum(request.flicken)).value;
|
||||
if (request.bytes.len < 5) continue; // TODO:
|
||||
|
||||
var iter = PatchLocationIterator.init(
|
||||
.{ .free, .free, .free, .free },
|
||||
@intFromPtr(request.bytes.ptr),
|
||||
);
|
||||
while (iter.next()) |valid_range| {
|
||||
const patch_range = try patcher.address_allocator.allocate(
|
||||
patcher.gpa,
|
||||
flicken.size(),
|
||||
valid_range,
|
||||
) orelse continue;
|
||||
assert(patch_range.size() == flicken.size());
|
||||
|
||||
{
|
||||
// Map patch_range as R|W.
|
||||
const start_page = mem.alignBackward(u64, patch_range.getStart(u64), page_size);
|
||||
const end_page = mem.alignBackward(u64, patch_range.getEnd(u64), page_size);
|
||||
const protection = posix.PROT.READ | posix.PROT.WRITE;
|
||||
var page_addr = start_page;
|
||||
while (page_addr <= end_page) : (page_addr += page_size) {
|
||||
// If the page is already writable we don't need to do anything;
|
||||
if (pages_made_writable.get(page_addr)) |_| continue;
|
||||
|
||||
const gop = try patcher.allocated_pages.getOrPut(patcher.gpa, page_addr);
|
||||
if (gop.found_existing) {
|
||||
const ptr: [*]align(page_size) u8 = @ptrFromInt(page_addr);
|
||||
try posix.mprotect(ptr[0..page_size], protection);
|
||||
} else {
|
||||
const addr = try posix.mmap(
|
||||
@ptrFromInt(page_addr),
|
||||
page_size,
|
||||
protection,
|
||||
.{ .TYPE = .PRIVATE, .ANONYMOUS = true, .FIXED_NOREPLACE = true },
|
||||
-1,
|
||||
0,
|
||||
);
|
||||
assert(@as(u64, @intFromPtr(addr.ptr)) == page_addr);
|
||||
// `gop.value_ptr.* = {};` is not needed because it's void.
|
||||
}
|
||||
try pages_made_writable.put(patcher.gpa, page_addr, {});
|
||||
}
|
||||
}
|
||||
|
||||
const flicken_addr: [*]u8 = @ptrFromInt(patch_range.getStart(u64));
|
||||
const flicken_slice = flicken_addr[0 .. flicken.bytes.len + 5];
|
||||
|
||||
const jump_to_offset: i32 = blk: {
|
||||
const from: i64 = @intCast(@intFromPtr(request.bytes.ptr) + jump_rel32_size);
|
||||
const to = patch_range.start;
|
||||
break :blk @intCast(to - from);
|
||||
};
|
||||
request.bytes[0] = jump_rel32;
|
||||
mem.writeInt(i32, request.bytes[1..5], jump_to_offset, .little);
|
||||
for (request.bytes[5..]) |*b| {
|
||||
b.* = int3;
|
||||
}
|
||||
|
||||
const jump_back_offset: i32 = blk: {
|
||||
const from = patch_range.end;
|
||||
const to: i64 = @intCast(@intFromPtr(request.bytes.ptr) + request.bytes.len);
|
||||
break :blk @intCast(to - from);
|
||||
};
|
||||
@memcpy(flicken_addr, flicken.bytes);
|
||||
flicken_slice[flicken.bytes.len] = jump_rel32;
|
||||
mem.writeInt(i32, flicken_slice[flicken.bytes.len + 1 ..][0..4], jump_back_offset, .little);
|
||||
|
||||
// The jumps have to be in the opposite direction.
|
||||
assert(math.sign(jump_to_offset) * math.sign(jump_back_offset) < 0);
|
||||
}
|
||||
}
|
||||
|
||||
// Change pages back to R|X.
|
||||
var iter = pages_made_writable.keyIterator();
|
||||
const protection = posix.PROT.READ | posix.PROT.EXEC;
|
||||
while (iter.next()) |page_addr| {
|
||||
const ptr: [*]align(page_size) u8 = @ptrFromInt(page_addr.*);
|
||||
try posix.mprotect(ptr[0..page_size], protection);
|
||||
}
|
||||
|
||||
log.info("patchRegion: Finished applying patches", .{});
|
||||
}
|
||||
// TODO: statistics
|
||||
}
|
||||
177
src/Range.zig
Normal file
177
src/Range.zig
Normal file
@@ -0,0 +1,177 @@
|
||||
//! Represents some kind of signed range with an inclusive lower bound and an exclusive upper bound.
|
||||
//! An empty Range can be represented by `start == end`.
|
||||
|
||||
const std = @import("std");
|
||||
|
||||
const assert = std.debug.assert;
|
||||
|
||||
const Range = @This();
|
||||
/// Inclusive lower bound of the range.
|
||||
start: i64,
|
||||
/// Exclusive upper bound of the range.
|
||||
end: i64,
|
||||
|
||||
pub fn size(range: Range) u64 {
|
||||
assert(range.end >= range.start);
|
||||
// return @intCast(@as(i128, range.end) - range.start); // prevent overflow
|
||||
return @intCast(range.end - range.start);
|
||||
}
|
||||
|
||||
pub fn alignTo(range: Range, alignment: u64) Range {
|
||||
assert(range.end >= range.start);
|
||||
assert(std.math.isPowerOfTwo(alignment));
|
||||
assert(alignment <= std.math.maxInt(i64));
|
||||
const lower = std.mem.alignBackward(i64, range.start, @intCast(alignment));
|
||||
const upper = std.mem.alignForward(i64, range.end, @intCast(alignment));
|
||||
assert(upper >= lower);
|
||||
return .{ .start = lower, .end = upper };
|
||||
}
|
||||
|
||||
pub fn overlaps(range: Range, other: Range) bool {
|
||||
assert(range.end >= range.start);
|
||||
assert(other.end >= other.start);
|
||||
return range.start < other.end and other.start < range.end;
|
||||
}
|
||||
|
||||
pub fn equals(range: Range, other: Range) bool {
|
||||
assert(range.end >= range.start);
|
||||
assert(other.end >= other.start);
|
||||
return range.start == other.start and range.end == other.end;
|
||||
}
|
||||
|
||||
pub fn contains(range: Range, other: Range) bool {
|
||||
assert(range.end >= range.start);
|
||||
assert(other.end >= other.start);
|
||||
return range.start <= other.start and range.end >= other.end;
|
||||
}
|
||||
|
||||
pub fn touches(range: Range, other: Range) bool {
|
||||
assert(range.end >= range.start);
|
||||
assert(other.end >= other.start);
|
||||
return range.start <= other.end and other.start <= range.end;
|
||||
}
|
||||
|
||||
/// Ranges are considered equal if they touch.
|
||||
pub fn compare(lhs: Range, rhs: Range) std.math.Order {
|
||||
assert(lhs.end >= lhs.start);
|
||||
assert(rhs.end >= rhs.start);
|
||||
return if (lhs.start > rhs.end) .gt else if (lhs.end < rhs.start) .lt else .eq;
|
||||
}
|
||||
|
||||
pub fn getStart(range: Range, T: type) T {
|
||||
return @intCast(range.start);
|
||||
}
|
||||
|
||||
pub fn getEnd(range: Range, T: type) T {
|
||||
return @intCast(range.end);
|
||||
}
|
||||
|
||||
pub fn format(
|
||||
self: @This(),
|
||||
writer: *std.Io.Writer,
|
||||
) std.Io.Writer.Error!void {
|
||||
try writer.print(".{{ .start = 0x{x}, .end = 0x{x} }}", .{ self.start, self.end });
|
||||
}
|
||||
|
||||
test "AddressRange size" {
|
||||
const range = Range{ .start = 100, .end = 250 };
|
||||
try std.testing.expectEqual(@as(u64, 150), range.size());
|
||||
}
|
||||
|
||||
test "AddressRange alignTo unaligned" {
|
||||
const range = Range{ .start = 101, .end = 199 };
|
||||
const aligned = range.alignTo(16);
|
||||
try std.testing.expectEqual(@as(i64, 96), aligned.start);
|
||||
try std.testing.expectEqual(@as(i64, 208), aligned.end);
|
||||
}
|
||||
|
||||
test "AddressRange alignTo already aligned" {
|
||||
const range = Range{ .start = 64, .end = 128 };
|
||||
const aligned = range.alignTo(64);
|
||||
try std.testing.expectEqual(@as(i64, 64), aligned.start);
|
||||
try std.testing.expectEqual(@as(i64, 128), aligned.end);
|
||||
}
|
||||
|
||||
test "AddressRange no overlap before" {
|
||||
const base = Range{ .start = 100, .end = 200 };
|
||||
const other = Range{ .start = 0, .end = 100 };
|
||||
try std.testing.expect(!base.overlaps(other));
|
||||
}
|
||||
|
||||
test "AddressRange no overlap after" {
|
||||
const base = Range{ .start = 100, .end = 200 };
|
||||
const other = Range{ .start = 200, .end = 300 };
|
||||
try std.testing.expect(!base.overlaps(other));
|
||||
}
|
||||
|
||||
test "AddressRange overlap at start" {
|
||||
const base = Range{ .start = 100, .end = 200 };
|
||||
const other = Range{ .start = 50, .end = 150 };
|
||||
try std.testing.expect(base.overlaps(other));
|
||||
}
|
||||
|
||||
test "AddressRange overlap at end" {
|
||||
const base = Range{ .start = 100, .end = 200 };
|
||||
const other = Range{ .start = 150, .end = 250 };
|
||||
try std.testing.expect(base.overlaps(other));
|
||||
}
|
||||
|
||||
test "AddressRange overlap contained" {
|
||||
const base = Range{ .start = 100, .end = 200 };
|
||||
const other = Range{ .start = 120, .end = 180 };
|
||||
try std.testing.expect(base.overlaps(other));
|
||||
}
|
||||
|
||||
test "AddressRange overlap containing" {
|
||||
const base = Range{ .start = 100, .end = 200 };
|
||||
const other = Range{ .start = 50, .end = 250 };
|
||||
try std.testing.expect(base.overlaps(other));
|
||||
}
|
||||
|
||||
test "AddressRange overlap identical" {
|
||||
const base = Range{ .start = 100, .end = 200 };
|
||||
const other = Range{ .start = 100, .end = 200 };
|
||||
try std.testing.expect(base.overlaps(other));
|
||||
}
|
||||
|
||||
test "AddressRange touches before" {
|
||||
const base = Range{ .start = 100, .end = 200 };
|
||||
const other = Range{ .start = 0, .end = 100 };
|
||||
try std.testing.expect(base.touches(other));
|
||||
}
|
||||
|
||||
test "AddressRange touches after" {
|
||||
const base = Range{ .start = 100, .end = 200 };
|
||||
const other = Range{ .start = 200, .end = 300 };
|
||||
try std.testing.expect(base.touches(other));
|
||||
}
|
||||
|
||||
test "AddressRange touches at start" {
|
||||
const base = Range{ .start = 100, .end = 200 };
|
||||
const other = Range{ .start = 50, .end = 150 };
|
||||
try std.testing.expect(base.touches(other));
|
||||
}
|
||||
|
||||
test "AddressRange touches at end" {
|
||||
const base = Range{ .start = 100, .end = 200 };
|
||||
const other = Range{ .start = 150, .end = 250 };
|
||||
try std.testing.expect(base.touches(other));
|
||||
}
|
||||
|
||||
test "AddressRange touches contained" {
|
||||
const base = Range{ .start = 100, .end = 200 };
|
||||
const other = Range{ .start = 120, .end = 180 };
|
||||
try std.testing.expect(base.touches(other));
|
||||
}
|
||||
|
||||
test "AddressRange touches containing" {
|
||||
const base = Range{ .start = 100, .end = 200 };
|
||||
const other = Range{ .start = 50, .end = 250 };
|
||||
try std.testing.expect(base.touches(other));
|
||||
}
|
||||
|
||||
test "AddressRange touches identical" {
|
||||
const base = Range{ .start = 100, .end = 200 };
|
||||
const other = Range{ .start = 100, .end = 200 };
|
||||
try std.testing.expect(base.touches(other));
|
||||
}
|
||||
134
src/disassembler.zig
Normal file
134
src/disassembler.zig
Normal file
@@ -0,0 +1,134 @@
|
||||
const std = @import("std");
|
||||
const mem = std.mem;
|
||||
const zydis = @import("zydis").zydis;
|
||||
|
||||
const log = std.log.scoped(.disassembler);
|
||||
|
||||
pub const InstructionIterator = struct {
|
||||
decoder: zydis.ZydisDecoder,
|
||||
bytes: []const u8,
|
||||
instruction: zydis.ZydisDecodedInstruction,
|
||||
operands: [zydis.ZYDIS_MAX_OPERAND_COUNT]zydis.ZydisDecodedOperand,
|
||||
|
||||
pub fn init(bytes: []const u8) InstructionIterator {
|
||||
var decoder: zydis.ZydisDecoder = undefined;
|
||||
const status = zydis.ZydisDecoderInit(
|
||||
&decoder,
|
||||
zydis.ZYDIS_MACHINE_MODE_LONG_64,
|
||||
zydis.ZYDIS_STACK_WIDTH_64,
|
||||
);
|
||||
if (!zydis.ZYAN_SUCCESS(status)) @panic("Zydis decoder init failed");
|
||||
return .{
|
||||
.decoder = decoder,
|
||||
.bytes = bytes,
|
||||
.instruction = undefined,
|
||||
.operands = undefined,
|
||||
};
|
||||
}
|
||||
|
||||
pub fn next(iterator: *InstructionIterator) ?BundledInstruction {
|
||||
var status = zydis.ZydisDecoderDecodeFull(
|
||||
&iterator.decoder,
|
||||
iterator.bytes.ptr,
|
||||
iterator.bytes.len,
|
||||
&iterator.instruction,
|
||||
&iterator.operands,
|
||||
);
|
||||
var address: u64 = @intFromPtr(iterator.bytes.ptr);
|
||||
|
||||
while (!zydis.ZYAN_SUCCESS(status)) {
|
||||
// TODO: handle common padding bytes
|
||||
switch (status) {
|
||||
zydis.ZYDIS_STATUS_NO_MORE_DATA => {
|
||||
log.info("next: Got status: NO_MORE_DATA. Iterator completed.", .{});
|
||||
return null;
|
||||
},
|
||||
zydis.ZYDIS_STATUS_ILLEGAL_LOCK => log.warn("next: Got status: ILLEGAL_LOCK. " ++
|
||||
"Byte stepping, to find next valid instruction begin", .{}),
|
||||
zydis.ZYDIS_STATUS_DECODING_ERROR => log.warn("next: Got status: DECODING_ERROR. " ++
|
||||
"Byte stepping, to find next valid instruction begin", .{}),
|
||||
else => log.warn("next: Got unknown status: 0x{x}. Byte stepping, to find next " ++
|
||||
"valid instruction begin", .{status}),
|
||||
}
|
||||
// TODO: add a flag to instead return an error
|
||||
log.debug(
|
||||
"next: instruction length: {}, address: 0x{x}, bytes: 0x{x}",
|
||||
.{
|
||||
iterator.instruction.length,
|
||||
address,
|
||||
iterator.bytes[0..iterator.instruction.length],
|
||||
},
|
||||
);
|
||||
|
||||
iterator.bytes = iterator.bytes[1..];
|
||||
status = zydis.ZydisDecoderDecodeFull(
|
||||
&iterator.decoder,
|
||||
iterator.bytes.ptr,
|
||||
iterator.bytes.len,
|
||||
&iterator.instruction,
|
||||
&iterator.operands,
|
||||
);
|
||||
address = @intFromPtr(iterator.bytes.ptr);
|
||||
}
|
||||
|
||||
iterator.bytes = iterator.bytes[iterator.instruction.length..];
|
||||
return .{
|
||||
.address = address,
|
||||
.instruction = &iterator.instruction,
|
||||
.operands = iterator.operands[0..iterator.instruction.operand_count_visible],
|
||||
};
|
||||
}
|
||||
};
|
||||
|
||||
pub const BundledInstruction = struct {
|
||||
address: u64,
|
||||
instruction: *const zydis.ZydisDecodedInstruction,
|
||||
operands: []const zydis.ZydisDecodedOperand,
|
||||
};
|
||||
|
||||
pub const InstructionFormatter = struct {
|
||||
formatter: zydis.ZydisFormatter,
|
||||
|
||||
pub fn init() InstructionFormatter {
|
||||
var formatter: zydis.ZydisFormatter = undefined;
|
||||
const status = zydis.ZydisFormatterInit(&formatter, zydis.ZYDIS_FORMATTER_STYLE_ATT);
|
||||
if (!zydis.ZYAN_SUCCESS(status)) @panic("Zydis formatter init failed");
|
||||
|
||||
return .{
|
||||
.formatter = formatter,
|
||||
};
|
||||
}
|
||||
|
||||
pub fn format(
|
||||
formatter: *const InstructionFormatter,
|
||||
instruction: BundledInstruction,
|
||||
buffer: []u8,
|
||||
) []u8 {
|
||||
const status = zydis.ZydisFormatterFormatInstruction(
|
||||
&formatter.formatter,
|
||||
instruction.instruction,
|
||||
instruction.operands.ptr,
|
||||
instruction.instruction.operand_count_visible,
|
||||
buffer.ptr,
|
||||
buffer.len,
|
||||
instruction.address,
|
||||
null,
|
||||
);
|
||||
if (!zydis.ZYAN_SUCCESS(status)) {
|
||||
@panic("wow");
|
||||
}
|
||||
return mem.sliceTo(buffer, 0);
|
||||
}
|
||||
};
|
||||
|
||||
/// Disassemble `bytes` and format them into the given buffer. Useful for error reporting or
|
||||
/// debugging purposes.
|
||||
/// This function should not be called in a tight loop as it's intentionally inefficient due tue
|
||||
/// having a simple API.
|
||||
pub fn formatBytes(bytes: []const u8, buffer: []u8) []u8 {
|
||||
var iter = InstructionIterator.init(bytes);
|
||||
|
||||
const instr = iter.next() orelse return buffer[0..0];
|
||||
const formatter = InstructionFormatter.init();
|
||||
return formatter.format(instr, buffer);
|
||||
}
|
||||
279
src/main.zig
Normal file
279
src/main.zig
Normal file
@@ -0,0 +1,279 @@
|
||||
const std = @import("std");
|
||||
const builtin = @import("builtin");
|
||||
|
||||
const elf = std.elf;
|
||||
const mem = std.mem;
|
||||
const posix = std.posix;
|
||||
const testing = std.testing;
|
||||
|
||||
const log = std.log.scoped(.flicker);
|
||||
const Patcher = @import("Patcher.zig");
|
||||
|
||||
const assert = std.debug.assert;
|
||||
|
||||
pub const std_options: std.Options = .{
|
||||
.log_level = .info,
|
||||
.log_scope_levels = &.{
|
||||
.{ .scope = .disassembler, .level = .info },
|
||||
.{ .scope = .patcher, .level = .debug },
|
||||
.{ .scope = .patch_location_iterator, .level = .warn },
|
||||
},
|
||||
};
|
||||
const page_size = std.heap.pageSize();
|
||||
const max_interp_path_length = 128;
|
||||
const help =
|
||||
\\Usage:
|
||||
\\ ./flicker [loader_flags] <executable> [args...]
|
||||
\\Flags:
|
||||
\\ -h print this help
|
||||
\\
|
||||
;
|
||||
|
||||
const UnfinishedReadError = error{UnfinishedRead};
|
||||
|
||||
var patcher: Patcher = undefined;
|
||||
|
||||
pub fn main() !void {
|
||||
// Parse arguments
|
||||
var arg_index: u64 = 1; // Skip own name
|
||||
while (arg_index < std.os.argv.len) : (arg_index += 1) {
|
||||
const arg = mem.sliceTo(std.os.argv[arg_index], '0');
|
||||
if (arg[0] != '-') break;
|
||||
if (mem.eql(u8, arg, "-h") or mem.eql(u8, arg, "--help")) {
|
||||
std.debug.print("{s}", .{help});
|
||||
return;
|
||||
}
|
||||
// TODO: Handle loader flags when/if we need them
|
||||
} else {
|
||||
std.debug.print("No executable given.\n", .{});
|
||||
std.debug.print("{s}", .{help});
|
||||
return;
|
||||
}
|
||||
|
||||
// Initialize patcher
|
||||
patcher = try Patcher.init(std.heap.page_allocator); // TODO: allocator
|
||||
|
||||
// Map file into memory
|
||||
const file = try lookupFile(mem.sliceTo(std.os.argv[arg_index], 0));
|
||||
var file_buffer: [128]u8 = undefined;
|
||||
var file_reader = file.reader(&file_buffer);
|
||||
log.info("--- Loading executable: {s} ---", .{std.os.argv[arg_index]});
|
||||
const ehdr = try elf.Header.read(&file_reader.interface);
|
||||
const base = try loadStaticElf(ehdr, &file_reader);
|
||||
const entry = ehdr.entry + if (ehdr.type == .DYN) base else 0;
|
||||
log.info("Executable loaded: base=0x{x}, entry=0x{x}", .{ base, entry });
|
||||
|
||||
// Check for dynamic linker
|
||||
var maybe_interp_base: ?usize = null;
|
||||
var maybe_interp_entry: ?usize = null;
|
||||
var phdrs = ehdr.iterateProgramHeaders(&file_reader);
|
||||
while (try phdrs.next()) |phdr| {
|
||||
if (phdr.p_type != elf.PT_INTERP) continue;
|
||||
|
||||
var interp_path: [max_interp_path_length]u8 = undefined;
|
||||
try file_reader.seekTo(phdr.p_offset);
|
||||
if (try file_reader.read(interp_path[0..phdr.p_filesz]) != phdr.p_filesz)
|
||||
return UnfinishedReadError.UnfinishedRead;
|
||||
assert(interp_path[phdr.p_filesz - 1] == 0); // Must be zero terminated
|
||||
log.info("Found interpreter path: {s}", .{interp_path[0 .. phdr.p_filesz - 1]});
|
||||
const interp = try std.fs.cwd().openFile(
|
||||
interp_path[0 .. phdr.p_filesz - 1],
|
||||
.{ .mode = .read_only },
|
||||
);
|
||||
|
||||
log.info("--- Loading interpreter ---", .{});
|
||||
var interp_buffer: [128]u8 = undefined;
|
||||
var interp_reader = interp.reader(&interp_buffer);
|
||||
const interp_ehdr = try elf.Header.read(&interp_reader.interface);
|
||||
assert(interp_ehdr.type == elf.ET.DYN);
|
||||
const interp_base = try loadStaticElf(interp_ehdr, &interp_reader);
|
||||
maybe_interp_base = interp_base;
|
||||
maybe_interp_entry = interp_ehdr.entry + if (interp_ehdr.type == .DYN) interp_base else 0;
|
||||
log.info(
|
||||
"Interpreter loaded: base=0x{x}, entry=0x{x}",
|
||||
.{ interp_base, maybe_interp_entry.? },
|
||||
);
|
||||
interp.close();
|
||||
}
|
||||
|
||||
var i: usize = 0;
|
||||
const auxv = std.os.linux.elf_aux_maybe.?;
|
||||
while (auxv[i].a_type != elf.AT_NULL) : (i += 1) {
|
||||
// TODO: look at other auxv types and check if we need to change them.
|
||||
auxv[i].a_un.a_val = switch (auxv[i].a_type) {
|
||||
elf.AT_PHDR => base + ehdr.phoff,
|
||||
elf.AT_PHENT => ehdr.phentsize,
|
||||
elf.AT_PHNUM => ehdr.phnum,
|
||||
elf.AT_BASE => maybe_interp_base orelse auxv[i].a_un.a_val,
|
||||
elf.AT_ENTRY => entry,
|
||||
elf.AT_EXECFN => @intFromPtr(std.os.argv[arg_index]),
|
||||
else => auxv[i].a_un.a_val,
|
||||
};
|
||||
}
|
||||
|
||||
// The stack layout provided by the kernel is:
|
||||
// argc, argv..., NULL, envp..., NULL, auxv...
|
||||
// We need to shift this block of memory to remove the loader's own arguments before we jump to
|
||||
// the new executable.
|
||||
// The end of the block is one entry past the AT_NULL entry in auxv.
|
||||
const end_of_auxv = &auxv[i + 1];
|
||||
const dest_ptr = @as([*]u8, @ptrCast(std.os.argv.ptr));
|
||||
const src_ptr = @as([*]u8, @ptrCast(&std.os.argv[arg_index]));
|
||||
const len = @intFromPtr(end_of_auxv) - @intFromPtr(src_ptr);
|
||||
log.debug(
|
||||
"Copying stack from {*} to {*} with length 0x{x}",
|
||||
.{ src_ptr, dest_ptr, len },
|
||||
);
|
||||
assert(@intFromPtr(dest_ptr) < @intFromPtr(src_ptr));
|
||||
std.mem.copyForwards(u8, dest_ptr[0..len], src_ptr[0..len]);
|
||||
|
||||
// `std.os.argv.ptr` points to the argv pointers. The word just before it is argc and also the
|
||||
// start of the stack.
|
||||
const argc: [*]usize = @as([*]usize, @ptrCast(@alignCast(&std.os.argv.ptr[0]))) - 1;
|
||||
argc[0] = std.os.argv.len - arg_index;
|
||||
log.debug("new argc: {x}", .{argc[0]});
|
||||
|
||||
const final_entry = maybe_interp_entry orelse entry;
|
||||
log.info("Trampolining to final entry: 0x{x} with sp: {*}", .{ final_entry, argc });
|
||||
trampoline(final_entry, argc);
|
||||
}
|
||||
|
||||
/// Loads all `PT_LOAD` segments of an ELF file into memory.
|
||||
///
|
||||
/// For `ET_EXEC` (non-PIE), segments are mapped at their fixed virtual addresses (`p_vaddr`).
|
||||
/// For `ET_DYN` (PIE), segments are mapped at a random base address chosen by the kernel.
|
||||
///
|
||||
/// It handles zero-initialized(e.g., .bss) sections by mapping anonymous memory and only reading
|
||||
/// `p_filesz` bytes from the file, ensuring `p_memsz` bytes are allocated.
|
||||
fn loadStaticElf(ehdr: elf.Header, file_reader: *std.fs.File.Reader) !usize {
|
||||
// NOTE: In theory we could also just look at the first and last loadable segment because the
|
||||
// ELF spec mandates these to be in ascending order of `p_vaddr`, but better be safe than sorry.
|
||||
// https://gabi.xinuos.com/elf/08-pheader.html#:~:text=ascending%20order
|
||||
const minva, const maxva = bounds: {
|
||||
var minva: u64 = std.math.maxInt(u64);
|
||||
var maxva: u64 = 0;
|
||||
var phdrs = ehdr.iterateProgramHeaders(file_reader);
|
||||
while (try phdrs.next()) |phdr| {
|
||||
if (phdr.p_type != elf.PT_LOAD) continue;
|
||||
minva = @min(minva, phdr.p_vaddr);
|
||||
maxva = @max(maxva, phdr.p_vaddr + phdr.p_memsz);
|
||||
}
|
||||
minva = mem.alignBackward(usize, minva, page_size);
|
||||
maxva = mem.alignForward(usize, maxva, page_size);
|
||||
log.debug("Calculated bounds: minva=0x{x}, maxva=0x{x}", .{ minva, maxva });
|
||||
break :bounds .{ minva, maxva };
|
||||
};
|
||||
|
||||
// Check, that the needed memory region can be allocated as a whole. We do this
|
||||
const dynamic = ehdr.type == elf.ET.DYN;
|
||||
log.debug("ELF type is {s}", .{if (dynamic) "DYN" else "EXEC (static)"});
|
||||
const hint = if (dynamic) null else @as(?[*]align(page_size) u8, @ptrFromInt(minva));
|
||||
log.debug("mmap pre-flight hint: {*}", .{hint});
|
||||
const base = try posix.mmap(
|
||||
hint,
|
||||
maxva - minva,
|
||||
posix.PROT.NONE,
|
||||
.{ .TYPE = .PRIVATE, .ANONYMOUS = true, .FIXED = !dynamic },
|
||||
-1,
|
||||
0,
|
||||
);
|
||||
log.debug("Pre-flight reservation at: {*}, size: 0x{x}", .{ base.ptr, base.len });
|
||||
posix.munmap(base);
|
||||
|
||||
const flags = posix.MAP{ .TYPE = .PRIVATE, .ANONYMOUS = true, .FIXED = true };
|
||||
var phdrs = ehdr.iterateProgramHeaders(file_reader);
|
||||
var phdr_idx: u32 = 0;
|
||||
errdefer posix.munmap(base);
|
||||
while (try phdrs.next()) |phdr| : (phdr_idx += 1) {
|
||||
if (phdr.p_type != elf.PT_LOAD) continue;
|
||||
if (phdr.p_memsz == 0) continue;
|
||||
|
||||
const offset = phdr.p_vaddr & (page_size - 1);
|
||||
const size = mem.alignForward(usize, phdr.p_memsz + offset, page_size);
|
||||
var start = mem.alignBackward(usize, phdr.p_vaddr, page_size);
|
||||
const base_for_dyn = if (dynamic) @intFromPtr(base.ptr) else 0;
|
||||
start += base_for_dyn;
|
||||
log.debug(
|
||||
" - phdr[{}]: mapping 0x{x} bytes at 0x{x} (vaddr=0x{x}, dyn_base=0x{x})",
|
||||
.{ phdr_idx, size, start, phdr.p_vaddr, base_for_dyn },
|
||||
);
|
||||
// NOTE: We can't use a single file-backed mmap for the segment, because p_memsz may be
|
||||
// larger than p_filesz. This difference accounts for the .bss section, which must be
|
||||
// zero-initialized.
|
||||
const ptr = try posix.mmap(
|
||||
@as(?[*]align(page_size) u8, @ptrFromInt(start)),
|
||||
size,
|
||||
posix.PROT.WRITE,
|
||||
flags,
|
||||
-1,
|
||||
0,
|
||||
);
|
||||
try file_reader.seekTo(phdr.p_offset);
|
||||
if (try file_reader.read(ptr[offset..][0..phdr.p_filesz]) != phdr.p_filesz)
|
||||
return UnfinishedReadError.UnfinishedRead;
|
||||
|
||||
const protections = elfToMmapProt(phdr.p_flags);
|
||||
if (protections & posix.PROT.EXEC > 0) {
|
||||
log.info("Patching executable segment", .{});
|
||||
try patcher.patchRegion(ptr);
|
||||
}
|
||||
try posix.mprotect(ptr, protections);
|
||||
}
|
||||
log.debug("loadElf returning base: 0x{x}", .{@intFromPtr(base.ptr)});
|
||||
return @intFromPtr(base.ptr);
|
||||
}
|
||||
|
||||
/// Converts ELF program header protection flags to mmap protection flags.
|
||||
fn elfToMmapProt(elf_prot: u64) u32 {
|
||||
var result: u32 = posix.PROT.NONE;
|
||||
if ((elf_prot & elf.PF_R) != 0) result |= posix.PROT.READ;
|
||||
if ((elf_prot & elf.PF_W) != 0) result |= posix.PROT.WRITE;
|
||||
if ((elf_prot & elf.PF_X) != 0) result |= posix.PROT.EXEC;
|
||||
return result;
|
||||
}
|
||||
|
||||
/// Opens the file by either opening via a (absolute or relative) path or searching through `PATH`
|
||||
/// for a file with the name.
|
||||
// TODO: support paths starting with ~
|
||||
fn lookupFile(path_or_name: []const u8) !std.fs.File {
|
||||
// If filename contains a slash ("/"), then it is interpreted as a pathname.
|
||||
if (std.mem.indexOfScalarPos(u8, path_or_name, 0, '/')) |_| {
|
||||
const fd = try posix.open(path_or_name, .{ .ACCMODE = .RDONLY, .CLOEXEC = true }, 0);
|
||||
return .{ .handle = fd };
|
||||
}
|
||||
|
||||
// If it has no slash we need to look it up in PATH.
|
||||
if (posix.getenvZ("PATH")) |env_path| {
|
||||
var paths = std.mem.tokenizeScalar(u8, env_path, ':');
|
||||
while (paths.next()) |p| {
|
||||
var dir = std.fs.openDirAbsolute(p, .{}) catch continue;
|
||||
defer dir.close();
|
||||
const fd = posix.openat(dir.fd, path_or_name, .{
|
||||
.ACCMODE = .RDONLY,
|
||||
.CLOEXEC = true,
|
||||
}, 0) catch continue;
|
||||
return .{ .handle = fd };
|
||||
}
|
||||
}
|
||||
|
||||
return error.FileNotFound;
|
||||
}
|
||||
|
||||
/// This function performs the final jump into the loaded program (amd64)
|
||||
// TODO: support more architectures
|
||||
fn trampoline(entry: usize, sp: [*]usize) noreturn {
|
||||
asm volatile (
|
||||
\\ mov %[sp], %%rsp
|
||||
\\ jmp *%[entry]
|
||||
: // No outputs
|
||||
: [entry] "r" (entry),
|
||||
[sp] "r" (sp),
|
||||
: .{ .rsp = true, .memory = true });
|
||||
unreachable;
|
||||
}
|
||||
|
||||
test {
|
||||
_ = @import("AddressAllocator.zig");
|
||||
_ = @import("Range.zig");
|
||||
_ = @import("PatchLocationIterator.zig");
|
||||
}
|
||||
54990
src/vendor/Zydis.c
vendored
Normal file
54990
src/vendor/Zydis.c
vendored
Normal file
File diff suppressed because one or more lines are too long
12113
src/vendor/Zydis.h
vendored
Normal file
12113
src/vendor/Zydis.h
vendored
Normal file
File diff suppressed because it is too large
Load Diff
4
src/vendor/zydis.zig
vendored
Normal file
4
src/vendor/zydis.zig
vendored
Normal file
@@ -0,0 +1,4 @@
|
||||
// Separate module to always compile it with a release mode.
|
||||
pub const zydis = @cImport({
|
||||
@cInclude("Zydis.h");
|
||||
});
|
||||
Reference in New Issue
Block a user