init
This commit is contained in:
73
LICENSE
Normal file
73
LICENSE
Normal file
@@ -0,0 +1,73 @@
|
|||||||
|
Apache License
|
||||||
|
Version 2.0, January 2004
|
||||||
|
http://www.apache.org/licenses/
|
||||||
|
|
||||||
|
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||||
|
|
||||||
|
1. Definitions.
|
||||||
|
|
||||||
|
"License" shall mean the terms and conditions for use, reproduction, and distribution as defined by Sections 1 through 9 of this document.
|
||||||
|
|
||||||
|
"Licensor" shall mean the copyright owner or entity authorized by the copyright owner that is granting the License.
|
||||||
|
|
||||||
|
"Legal Entity" shall mean the union of the acting entity and all other entities that control, are controlled by, or are under common control with that entity. For the purposes of this definition, "control" means (i) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the outstanding shares, or (iii) beneficial ownership of such entity.
|
||||||
|
|
||||||
|
"You" (or "Your") shall mean an individual or Legal Entity exercising permissions granted by this License.
|
||||||
|
|
||||||
|
"Source" form shall mean the preferred form for making modifications, including but not limited to software source code, documentation source, and configuration files.
|
||||||
|
|
||||||
|
"Object" form shall mean any form resulting from mechanical transformation or translation of a Source form, including but not limited to compiled object code, generated documentation, and conversions to other media types.
|
||||||
|
|
||||||
|
"Work" shall mean the work of authorship, whether in Source or Object form, made available under the License, as indicated by a copyright notice that is included in or attached to the work (an example is provided in the Appendix below).
|
||||||
|
|
||||||
|
"Derivative Works" shall mean any work, whether in Source or Object form, that is based on (or derived from) the Work and for which the editorial revisions, annotations, elaborations, or other modifications represent, as a whole, an original work of authorship. For the purposes of this License, Derivative Works shall not include works that remain separable from, or merely link (or bind by name) to the interfaces of, the Work and Derivative Works thereof.
|
||||||
|
|
||||||
|
"Contribution" shall mean any work of authorship, including the original version of the Work and any modifications or additions to that Work or Derivative Works thereof, that is intentionally submitted to Licensor for inclusion in the Work by the copyright owner or by an individual or Legal Entity authorized to submit on behalf of the copyright owner. For the purposes of this definition, "submitted" means any form of electronic, verbal, or written communication sent to the Licensor or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, the Licensor for the purpose of discussing and improving the Work, but excluding communication that is conspicuously marked or otherwise designated in writing by the copyright owner as "Not a Contribution."
|
||||||
|
|
||||||
|
"Contributor" shall mean Licensor and any individual or Legal Entity on behalf of whom a Contribution has been received by Licensor and subsequently incorporated within the Work.
|
||||||
|
|
||||||
|
2. Grant of Copyright License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, prepare Derivative Works of, publicly display, publicly perform, sublicense, and distribute the Work and such Derivative Works in Source or Object form.
|
||||||
|
|
||||||
|
3. Grant of Patent License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this section) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Work, where such license applies only to those patent claims licensable by such Contributor that are necessarily infringed by their Contribution(s) alone or by combination of their Contribution(s) with the Work to which such Contribution(s) was submitted. If You institute patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Work or a Contribution incorporated within the Work constitutes direct or contributory patent infringement, then any patent licenses granted to You under this License for that Work shall terminate as of the date such litigation is filed.
|
||||||
|
|
||||||
|
4. Redistribution. You may reproduce and distribute copies of the Work or Derivative Works thereof in any medium, with or without modifications, and in Source or Object form, provided that You meet the following conditions:
|
||||||
|
|
||||||
|
(a) You must give any other recipients of the Work or Derivative Works a copy of this License; and
|
||||||
|
|
||||||
|
(b) You must cause any modified files to carry prominent notices stating that You changed the files; and
|
||||||
|
|
||||||
|
(c) You must retain, in the Source form of any Derivative Works that You distribute, all copyright, patent, trademark, and attribution notices from the Source form of the Work, excluding those notices that do not pertain to any part of the Derivative Works; and
|
||||||
|
|
||||||
|
(d) If the Work includes a "NOTICE" text file as part of its distribution, then any Derivative Works that You distribute must include a readable copy of the attribution notices contained within such NOTICE file, excluding those notices that do not pertain to any part of the Derivative Works, in at least one of the following places: within a NOTICE text file distributed as part of the Derivative Works; within the Source form or documentation, if provided along with the Derivative Works; or, within a display generated by the Derivative Works, if and wherever such third-party notices normally appear. The contents of the NOTICE file are for informational purposes only and do not modify the License. You may add Your own attribution notices within Derivative Works that You distribute, alongside or as an addendum to the NOTICE text from the Work, provided that such additional attribution notices cannot be construed as modifying the License.
|
||||||
|
|
||||||
|
You may add Your own copyright statement to Your modifications and may provide additional or different license terms and conditions for use, reproduction, or distribution of Your modifications, or for any such Derivative Works as a whole, provided Your use, reproduction, and distribution of the Work otherwise complies with the conditions stated in this License.
|
||||||
|
|
||||||
|
5. Submission of Contributions. Unless You explicitly state otherwise, any Contribution intentionally submitted for inclusion in the Work by You to the Licensor shall be under the terms and conditions of this License, without any additional terms or conditions. Notwithstanding the above, nothing herein shall supersede or modify the terms of any separate license agreement you may have executed with Licensor regarding such Contributions.
|
||||||
|
|
||||||
|
6. Trademarks. This License does not grant permission to use the trade names, trademarks, service marks, or product names of the Licensor, except as required for reasonable and customary use in describing the origin of the Work and reproducing the content of the NOTICE file.
|
||||||
|
|
||||||
|
7. Disclaimer of Warranty. Unless required by applicable law or agreed to in writing, Licensor provides the Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining the appropriateness of using or redistributing the Work and assume any risks associated with Your exercise of permissions under this License.
|
||||||
|
|
||||||
|
8. Limitation of Liability. In no event and under no legal theory, whether in tort (including negligence), contract, or otherwise, unless required by applicable law (such as deliberate and grossly negligent acts) or agreed to in writing, shall any Contributor be liable to You for damages, including any direct, indirect, special, incidental, or consequential damages of any character arising as a result of this License or out of the use or inability to use the Work (including but not limited to damages for loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses), even if such Contributor has been advised of the possibility of such damages.
|
||||||
|
|
||||||
|
9. Accepting Warranty or Additional Liability. While redistributing the Work or Derivative Works thereof, You may choose to offer, and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability.
|
||||||
|
|
||||||
|
END OF TERMS AND CONDITIONS
|
||||||
|
|
||||||
|
APPENDIX: How to apply the Apache License to your work.
|
||||||
|
|
||||||
|
To apply the Apache License to your work, attach the following boilerplate notice, with the fields enclosed by brackets "[]" replaced with your own identifying information. (Don't include the brackets!) The text should be enclosed in the appropriate comment syntax for the file format. We also recommend that a file or class name and description of purpose be included on the same "printed page" as the copyright notice for easier identification within third-party archives.
|
||||||
|
|
||||||
|
Copyright 2025 pzittlau
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
52
build.zig
Normal file
52
build.zig
Normal file
@@ -0,0 +1,52 @@
|
|||||||
|
const std = @import("std");
|
||||||
|
|
||||||
|
pub fn build(b: *std.Build) !void {
|
||||||
|
const target = b.standardTargetOptions(.{});
|
||||||
|
const optimize = b.standardOptimizeOption(.{});
|
||||||
|
|
||||||
|
// Separate module to always compile it with a release mode.
|
||||||
|
const zydis = b.createModule(.{
|
||||||
|
.root_source_file = b.path("src/vendor/zydis.zig"),
|
||||||
|
// .optimize = if (optimize == .Debug) .ReleaseSafe else optimize,
|
||||||
|
.optimize = .ReleaseFast,
|
||||||
|
.target = target,
|
||||||
|
.link_libc = false,
|
||||||
|
.link_libcpp = false,
|
||||||
|
});
|
||||||
|
zydis.addCMacro("ZYAN_NO_LIBC", "1");
|
||||||
|
zydis.addIncludePath(b.path("src/vendor/"));
|
||||||
|
zydis.addCSourceFile(.{
|
||||||
|
.file = b.path("src/vendor/Zydis.c"),
|
||||||
|
.flags = &.{"-g"},
|
||||||
|
.language = .c,
|
||||||
|
});
|
||||||
|
|
||||||
|
const mod = b.createModule(.{
|
||||||
|
.root_source_file = b.path("src/main.zig"),
|
||||||
|
.optimize = optimize,
|
||||||
|
.target = target,
|
||||||
|
.link_libc = false,
|
||||||
|
.link_libcpp = false,
|
||||||
|
.imports = &.{.{ .name = "zydis", .module = zydis }},
|
||||||
|
});
|
||||||
|
|
||||||
|
const exe = b.addExecutable(.{
|
||||||
|
.name = "flicker",
|
||||||
|
.root_module = mod,
|
||||||
|
});
|
||||||
|
exe.pie = true;
|
||||||
|
b.installArtifact(exe);
|
||||||
|
|
||||||
|
const run_step = b.step("run", "Run the app");
|
||||||
|
const run_cmd = b.addRunArtifact(exe);
|
||||||
|
run_step.dependOn(&run_cmd.step);
|
||||||
|
run_cmd.step.dependOn(b.getInstallStep());
|
||||||
|
if (b.args) |args| {
|
||||||
|
run_cmd.addArgs(args);
|
||||||
|
}
|
||||||
|
|
||||||
|
const exe_tests = b.addTest(.{ .root_module = mod });
|
||||||
|
const run_exe_tests = b.addRunArtifact(exe_tests);
|
||||||
|
const test_step = b.step("test", "Run tests");
|
||||||
|
test_step.dependOn(&run_exe_tests.step);
|
||||||
|
}
|
||||||
14
build.zig.zon
Normal file
14
build.zig.zon
Normal file
@@ -0,0 +1,14 @@
|
|||||||
|
.{
|
||||||
|
.name = .flicker,
|
||||||
|
.version = "0.0.0",
|
||||||
|
.fingerprint = 0x558e8f62ba3bc564, // Changing this has security and trust implications.
|
||||||
|
.minimum_zig_version = "0.15.1",
|
||||||
|
.dependencies = .{},
|
||||||
|
.paths = .{
|
||||||
|
"build.zig",
|
||||||
|
"build.zig.zon",
|
||||||
|
"src",
|
||||||
|
"LICENSE",
|
||||||
|
"README.md",
|
||||||
|
},
|
||||||
|
}
|
||||||
424
src/AddressAllocator.zig
Normal file
424
src/AddressAllocator.zig
Normal file
@@ -0,0 +1,424 @@
|
|||||||
|
const std = @import("std");
|
||||||
|
const mem = std.mem;
|
||||||
|
const sort = std.sort;
|
||||||
|
const testing = std.testing;
|
||||||
|
|
||||||
|
const assert = std.debug.assert;
|
||||||
|
|
||||||
|
const Range = @import("Range.zig");
|
||||||
|
const log = std.log.scoped(.address_allocator);
|
||||||
|
|
||||||
|
const AddressAllocator = @This();
|
||||||
|
|
||||||
|
/// The **sorted** list of `Range`s that are blocked.
|
||||||
|
ranges: std.ArrayListUnmanaged(Range) = .empty,
|
||||||
|
|
||||||
|
pub const empty = AddressAllocator{};
|
||||||
|
|
||||||
|
pub fn deinit(address_allocator: *AddressAllocator, gpa: mem.Allocator) void {
|
||||||
|
address_allocator.ranges.deinit(gpa);
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Block a range to not be used by the `allocate` function. This function will always succeed, if
|
||||||
|
/// there is enough memory available.
|
||||||
|
pub fn block(
|
||||||
|
address_allocator: *AddressAllocator,
|
||||||
|
gpa: mem.Allocator,
|
||||||
|
range: Range,
|
||||||
|
alignment: u64,
|
||||||
|
) !void {
|
||||||
|
assert(address_allocator.isSorted());
|
||||||
|
defer assert(address_allocator.isSorted());
|
||||||
|
|
||||||
|
const aligned_range = if (alignment != 0) range.alignTo(alignment) else range;
|
||||||
|
assert(aligned_range.contains(range));
|
||||||
|
if (aligned_range.size() == 0) return;
|
||||||
|
|
||||||
|
// Find the correct sorted position to insert the new range.
|
||||||
|
const insert_idx = sort.lowerBound(
|
||||||
|
Range,
|
||||||
|
address_allocator.ranges.items,
|
||||||
|
aligned_range,
|
||||||
|
Range.compare,
|
||||||
|
);
|
||||||
|
log.debug(
|
||||||
|
"block: range: {}, alignment: {}, aligned_range: {}, insert_idx: {}",
|
||||||
|
.{ range, alignment, aligned_range, insert_idx },
|
||||||
|
);
|
||||||
|
// If the new range is the greatest one OR if the entry at `insert_idx` is greater than the
|
||||||
|
// new range, we can just insert.
|
||||||
|
if (insert_idx == address_allocator.ranges.items.len or
|
||||||
|
address_allocator.ranges.items[insert_idx].compare(aligned_range) == .gt)
|
||||||
|
{
|
||||||
|
log.debug("block: New range inserted", .{});
|
||||||
|
return address_allocator.ranges.insert(gpa, insert_idx, aligned_range);
|
||||||
|
}
|
||||||
|
errdefer comptime unreachable;
|
||||||
|
assert(address_allocator.ranges.items.len > 0);
|
||||||
|
|
||||||
|
// Now `insert_idx` points to the first entry, that touches `aligned_range`.
|
||||||
|
assert(address_allocator.ranges.items[insert_idx].touches(aligned_range));
|
||||||
|
if (insert_idx > 1 and address_allocator.ranges.items.len > 1) {
|
||||||
|
assert(!address_allocator.ranges.items[insert_idx - 1].touches(aligned_range));
|
||||||
|
}
|
||||||
|
log.debug("block: `aligned_range` touches at least one existing range.", .{});
|
||||||
|
|
||||||
|
// NOTE: We merge entries that touch eachother to speedup future traversals.
|
||||||
|
// There are a few cases how to handle the merging:
|
||||||
|
// 1. `aligned_range` is contained by the existing range. Then we have to do nothing and can
|
||||||
|
// return early.
|
||||||
|
// 2. `aligned_range` contains the existing range. Then we have to overwrite `start` and `end`.
|
||||||
|
// 3. The existing range is before `aligned_range`. Set `existing.end` to `aligned_range.end`.
|
||||||
|
// 4. The existing range is after `aligned_range`. Set `existing.start` to `aligned.start`.
|
||||||
|
// After we have done this to the first range that touches, we will loop over the other ones
|
||||||
|
// that touch and just have to apply rule 4 repeatedly.
|
||||||
|
const first = &address_allocator.ranges.items[insert_idx];
|
||||||
|
if (first.contains(aligned_range)) {
|
||||||
|
log.debug("block: Existing range at index {} contains new range. No-op", .{insert_idx});
|
||||||
|
return;
|
||||||
|
} else if (aligned_range.contains(first.*)) {
|
||||||
|
log.debug(
|
||||||
|
"block: New range contains existing range at index {}: {} -> {}",
|
||||||
|
.{ insert_idx, first, aligned_range },
|
||||||
|
);
|
||||||
|
first.* = aligned_range;
|
||||||
|
} else if (aligned_range.start <= first.end and aligned_range.end >= first.end) {
|
||||||
|
assert(aligned_range.start > first.start);
|
||||||
|
log.debug(
|
||||||
|
"block: Adjusting range end at index {}: {} -> {}",
|
||||||
|
.{ insert_idx, first.end, aligned_range.end },
|
||||||
|
);
|
||||||
|
first.*.end = aligned_range.end;
|
||||||
|
} else if (aligned_range.end >= first.start and aligned_range.start <= first.start) {
|
||||||
|
assert(aligned_range.end < first.end);
|
||||||
|
log.debug(
|
||||||
|
"block: Adjusting range start at index {}: {} -> {}",
|
||||||
|
.{ insert_idx, first.start, aligned_range.start },
|
||||||
|
);
|
||||||
|
first.*.start = aligned_range.start;
|
||||||
|
} else {
|
||||||
|
unreachable;
|
||||||
|
}
|
||||||
|
|
||||||
|
// TODO: comment why we do this
|
||||||
|
if (insert_idx >= address_allocator.ranges.items.len - 1) return;
|
||||||
|
|
||||||
|
var neighbor = &address_allocator.ranges.items[insert_idx + 1];
|
||||||
|
var i: u64 = 0;
|
||||||
|
while (neighbor.touches(aligned_range)) {
|
||||||
|
assert(aligned_range.end >= neighbor.start);
|
||||||
|
assert(aligned_range.start <= neighbor.start);
|
||||||
|
|
||||||
|
if (neighbor.end > first.end) {
|
||||||
|
log.debug(
|
||||||
|
"block: Merging neighbor range at index {}: {} -> {}.",
|
||||||
|
.{ insert_idx + 1, first.end, neighbor.end },
|
||||||
|
);
|
||||||
|
first.end = neighbor.end;
|
||||||
|
}
|
||||||
|
const removed = address_allocator.ranges.orderedRemove(insert_idx + 1);
|
||||||
|
log.debug("block: Removed merged range: {}", .{removed});
|
||||||
|
i += 1;
|
||||||
|
}
|
||||||
|
log.debug("block: Removed {} ranges.", .{i});
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Allocate and block a `Range` of size `size` which will lie inside the given `valid_range`. If no
|
||||||
|
/// allocation of the given size is possible, return `null`.
|
||||||
|
pub fn allocate(
|
||||||
|
address_allocator: *AddressAllocator,
|
||||||
|
gpa: mem.Allocator,
|
||||||
|
size: u64,
|
||||||
|
valid_range: Range,
|
||||||
|
) !?Range {
|
||||||
|
log.debug("allocate: Allocating size {} in range {}", .{ size, valid_range });
|
||||||
|
if (valid_range.size() < size) return null;
|
||||||
|
if (size == 0) return null;
|
||||||
|
const size_i: i64 = @intCast(size);
|
||||||
|
|
||||||
|
// OPTIM: Use binary search to find the start of the valid range inside the reserved ranges.
|
||||||
|
|
||||||
|
// `candidate_start` tracks the beginning of the current free region being examined.
|
||||||
|
var candidate_start = valid_range.start;
|
||||||
|
for (address_allocator.ranges.items) |reserved| {
|
||||||
|
if (candidate_start >= valid_range.end) {
|
||||||
|
log.debug("allocate: Searched past the valid range.", .{});
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
// The potential allocation gap is before the current reserved block.
|
||||||
|
if (candidate_start < reserved.start) {
|
||||||
|
// Determine the actual available portion of the gap within our search `range`.
|
||||||
|
const gap_end = @min(reserved.start, valid_range.end);
|
||||||
|
if (gap_end >= candidate_start + size_i) {
|
||||||
|
const new_range = Range{
|
||||||
|
.start = candidate_start,
|
||||||
|
.end = candidate_start + size_i,
|
||||||
|
};
|
||||||
|
try address_allocator.block(gpa, new_range, 0);
|
||||||
|
assert(valid_range.contains(new_range));
|
||||||
|
log.debug("allocate: Found free gap: {}", .{new_range});
|
||||||
|
return new_range;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// The gap was not large enough. Move the candidate start past the current reserved block
|
||||||
|
// for the next iteration.
|
||||||
|
candidate_start = @max(candidate_start, reserved.end);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check the remaining space at the end of the search range.
|
||||||
|
if (valid_range.end >= candidate_start + size_i) {
|
||||||
|
const new_range = Range{
|
||||||
|
.start = candidate_start,
|
||||||
|
.end = candidate_start + size_i,
|
||||||
|
};
|
||||||
|
try address_allocator.block(gpa, new_range, 0);
|
||||||
|
assert(valid_range.contains(new_range));
|
||||||
|
log.debug("allocate: Found free gap at end: {}", .{new_range});
|
||||||
|
return new_range;
|
||||||
|
}
|
||||||
|
|
||||||
|
log.debug("allocate: No suitable gap found.", .{});
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
|
||||||
|
fn isSorted(address_allocator: *const AddressAllocator) bool {
|
||||||
|
return sort.isSorted(Range, address_allocator.ranges.items, {}, isSortedInner);
|
||||||
|
}
|
||||||
|
fn isSortedInner(_: void, lhs: Range, rhs: Range) bool {
|
||||||
|
return switch (lhs.compare(rhs)) {
|
||||||
|
.lt => true,
|
||||||
|
.gt => false,
|
||||||
|
.eq => unreachable,
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
test "block basic" {
|
||||||
|
var aa = AddressAllocator{};
|
||||||
|
defer aa.deinit(testing.allocator);
|
||||||
|
|
||||||
|
try aa.block(testing.allocator, .{ .start = 0, .end = 100 }, 0);
|
||||||
|
try testing.expectEqual(Range{ .start = 0, .end = 100 }, aa.ranges.items[0]);
|
||||||
|
|
||||||
|
try aa.block(testing.allocator, .{ .start = 200, .end = 300 }, 0);
|
||||||
|
try testing.expectEqual(Range{ .start = 0, .end = 100 }, aa.ranges.items[0]);
|
||||||
|
try testing.expectEqual(Range{ .start = 200, .end = 300 }, aa.ranges.items[1]);
|
||||||
|
try testing.expectEqual(2, aa.ranges.items.len);
|
||||||
|
}
|
||||||
|
|
||||||
|
test "block in hole" {
|
||||||
|
var aa = AddressAllocator{};
|
||||||
|
defer aa.deinit(testing.allocator);
|
||||||
|
|
||||||
|
try aa.block(testing.allocator, .{ .start = 0, .end = 100 }, 0);
|
||||||
|
try testing.expectEqual(Range{ .start = 0, .end = 100 }, aa.ranges.items[0]);
|
||||||
|
|
||||||
|
try aa.block(testing.allocator, .{ .start = 400, .end = 500 }, 0);
|
||||||
|
try testing.expectEqual(2, aa.ranges.items.len);
|
||||||
|
try testing.expectEqual(Range{ .start = 0, .end = 100 }, aa.ranges.items[0]);
|
||||||
|
try testing.expectEqual(Range{ .start = 400, .end = 500 }, aa.ranges.items[1]);
|
||||||
|
|
||||||
|
try aa.block(testing.allocator, .{ .start = 200, .end = 300 }, 0);
|
||||||
|
try testing.expectEqual(3, aa.ranges.items.len);
|
||||||
|
try testing.expectEqual(Range{ .start = 0, .end = 100 }, aa.ranges.items[0]);
|
||||||
|
try testing.expectEqual(Range{ .start = 200, .end = 300 }, aa.ranges.items[1]);
|
||||||
|
try testing.expectEqual(Range{ .start = 400, .end = 500 }, aa.ranges.items[2]);
|
||||||
|
}
|
||||||
|
|
||||||
|
test "block touch with previous" {
|
||||||
|
var aa = AddressAllocator{};
|
||||||
|
defer aa.deinit(testing.allocator);
|
||||||
|
|
||||||
|
try aa.block(testing.allocator, .{ .start = 0, .end = 100 }, 0);
|
||||||
|
try aa.block(testing.allocator, .{ .start = 100, .end = 200 }, 0);
|
||||||
|
try testing.expectEqual(Range{ .start = 0, .end = 200 }, aa.ranges.items[0]);
|
||||||
|
try testing.expectEqual(1, aa.ranges.items.len);
|
||||||
|
|
||||||
|
try aa.block(testing.allocator, .{ .start = 100, .end = 300 }, 0);
|
||||||
|
try testing.expectEqual(Range{ .start = 0, .end = 300 }, aa.ranges.items[0]);
|
||||||
|
try testing.expectEqual(1, aa.ranges.items.len);
|
||||||
|
|
||||||
|
try aa.block(testing.allocator, .{ .start = 300, .end = 400 }, 0);
|
||||||
|
try testing.expectEqual(Range{ .start = 0, .end = 400 }, aa.ranges.items[0]);
|
||||||
|
try testing.expectEqual(1, aa.ranges.items.len);
|
||||||
|
}
|
||||||
|
|
||||||
|
test "block touch with following" {
|
||||||
|
var aa = AddressAllocator{};
|
||||||
|
defer aa.deinit(testing.allocator);
|
||||||
|
|
||||||
|
try aa.block(testing.allocator, .{ .start = 200, .end = 300 }, 0);
|
||||||
|
try aa.block(testing.allocator, .{ .start = 100, .end = 200 }, 0);
|
||||||
|
try testing.expectEqual(Range{ .start = 100, .end = 300 }, aa.ranges.items[0]);
|
||||||
|
try testing.expectEqual(1, aa.ranges.items.len);
|
||||||
|
|
||||||
|
try aa.block(testing.allocator, .{ .start = 0, .end = 200 }, 0);
|
||||||
|
try testing.expectEqual(Range{ .start = 0, .end = 300 }, aa.ranges.items[0]);
|
||||||
|
try testing.expectEqual(1, aa.ranges.items.len);
|
||||||
|
|
||||||
|
try aa.block(testing.allocator, .{ .start = -100, .end = 0 }, 0);
|
||||||
|
try testing.expectEqual(Range{ .start = -100, .end = 300 }, aa.ranges.items[0]);
|
||||||
|
try testing.expectEqual(1, aa.ranges.items.len);
|
||||||
|
}
|
||||||
|
|
||||||
|
test "block overlap with previous and following" {
|
||||||
|
var aa = AddressAllocator{};
|
||||||
|
defer aa.deinit(testing.allocator);
|
||||||
|
|
||||||
|
try aa.block(testing.allocator, .{ .start = 0, .end = 100 }, 0);
|
||||||
|
try aa.block(testing.allocator, .{ .start = 200, .end = 300 }, 0);
|
||||||
|
try testing.expectEqual(Range{ .start = 0, .end = 100 }, aa.ranges.items[0]);
|
||||||
|
try testing.expectEqual(Range{ .start = 200, .end = 300 }, aa.ranges.items[1]);
|
||||||
|
try testing.expectEqual(2, aa.ranges.items.len);
|
||||||
|
|
||||||
|
try aa.block(testing.allocator, .{ .start = 50, .end = 250 }, 0);
|
||||||
|
try testing.expectEqual(Range{ .start = 0, .end = 300 }, aa.ranges.items[0]);
|
||||||
|
try testing.expectEqual(1, aa.ranges.items.len);
|
||||||
|
}
|
||||||
|
|
||||||
|
test "block contained by existing" {
|
||||||
|
var aa = AddressAllocator{};
|
||||||
|
defer aa.deinit(testing.allocator);
|
||||||
|
|
||||||
|
try aa.block(testing.allocator, .{ .start = 100, .end = 300 }, 0);
|
||||||
|
try aa.block(testing.allocator, .{ .start = 200, .end = 250 }, 0);
|
||||||
|
try testing.expectEqual(Range{ .start = 100, .end = 300 }, aa.ranges.items[0]);
|
||||||
|
try testing.expectEqual(1, aa.ranges.items.len);
|
||||||
|
}
|
||||||
|
|
||||||
|
test "block contains existing" {
|
||||||
|
var aa = AddressAllocator{};
|
||||||
|
defer aa.deinit(testing.allocator);
|
||||||
|
|
||||||
|
try aa.block(testing.allocator, .{ .start = 50, .end = 100 }, 0);
|
||||||
|
try aa.block(testing.allocator, .{ .start = 0, .end = 200 }, 0);
|
||||||
|
try testing.expectEqual(Range{ .start = 0, .end = 200 }, aa.ranges.items[0]);
|
||||||
|
try testing.expectEqual(1, aa.ranges.items.len);
|
||||||
|
}
|
||||||
|
|
||||||
|
test "block overlaps multiple" {
|
||||||
|
var aa = AddressAllocator{};
|
||||||
|
defer aa.deinit(testing.allocator);
|
||||||
|
|
||||||
|
try aa.block(testing.allocator, .{ .start = 0, .end = 100 }, 0);
|
||||||
|
try aa.block(testing.allocator, .{ .start = 150, .end = 200 }, 0);
|
||||||
|
try aa.block(testing.allocator, .{ .start = 250, .end = 300 }, 0);
|
||||||
|
try aa.block(testing.allocator, .{ .start = 350, .end = 400 }, 0);
|
||||||
|
try aa.block(testing.allocator, .{ .start = 450, .end = 500 }, 0);
|
||||||
|
try testing.expectEqual(5, aa.ranges.items.len);
|
||||||
|
|
||||||
|
try aa.block(testing.allocator, .{ .start = 50, .end = 475 }, 0);
|
||||||
|
try testing.expectEqual(Range{ .start = 0, .end = 500 }, aa.ranges.items[0]);
|
||||||
|
try testing.expectEqual(1, aa.ranges.items.len);
|
||||||
|
}
|
||||||
|
|
||||||
|
test "allocate in empty allocator" {
|
||||||
|
var aa = AddressAllocator{};
|
||||||
|
defer aa.deinit(testing.allocator);
|
||||||
|
|
||||||
|
const search_range = Range{ .start = 0, .end = 1000 };
|
||||||
|
const allocated = try aa.allocate(testing.allocator, 100, search_range);
|
||||||
|
try testing.expectEqual(1, aa.ranges.items.len);
|
||||||
|
try testing.expectEqual(Range{ .start = 0, .end = 100 }, aa.ranges.items[0]);
|
||||||
|
try testing.expectEqual(Range{ .start = 0, .end = 100 }, allocated);
|
||||||
|
}
|
||||||
|
|
||||||
|
test "allocate with no space" {
|
||||||
|
var aa = AddressAllocator{};
|
||||||
|
defer aa.deinit(testing.allocator);
|
||||||
|
|
||||||
|
const range = Range{ .start = 0, .end = 1000 };
|
||||||
|
try aa.block(testing.allocator, range, 0);
|
||||||
|
const allocated = try aa.allocate(testing.allocator, 100, range);
|
||||||
|
try testing.expect(allocated == null);
|
||||||
|
}
|
||||||
|
|
||||||
|
test "allocate in a gap" {
|
||||||
|
var aa = AddressAllocator{};
|
||||||
|
defer aa.deinit(testing.allocator);
|
||||||
|
|
||||||
|
try aa.block(testing.allocator, .{ .start = 0, .end = 100 }, 0);
|
||||||
|
try aa.block(testing.allocator, .{ .start = 200, .end = 300 }, 0);
|
||||||
|
|
||||||
|
const search_range = Range{ .start = 0, .end = 1000 };
|
||||||
|
const allocated = try aa.allocate(testing.allocator, 50, search_range);
|
||||||
|
try testing.expectEqual(Range{ .start = 100, .end = 150 }, allocated);
|
||||||
|
try testing.expectEqual(2, aa.ranges.items.len);
|
||||||
|
try testing.expectEqual(Range{ .start = 0, .end = 150 }, aa.ranges.items[0]);
|
||||||
|
try testing.expectEqual(Range{ .start = 200, .end = 300 }, aa.ranges.items[1]);
|
||||||
|
}
|
||||||
|
|
||||||
|
test "allocate at the end" {
|
||||||
|
var aa = AddressAllocator{};
|
||||||
|
defer aa.deinit(testing.allocator);
|
||||||
|
|
||||||
|
try aa.block(testing.allocator, .{ .start = 0, .end = 100 }, 0);
|
||||||
|
|
||||||
|
const search_range = Range{ .start = 0, .end = 1000 };
|
||||||
|
const allocated = try aa.allocate(testing.allocator, 200, search_range);
|
||||||
|
try testing.expectEqual(Range{ .start = 100, .end = 300 }, allocated);
|
||||||
|
try testing.expectEqual(1, aa.ranges.items.len);
|
||||||
|
try testing.expectEqual(Range{ .start = 0, .end = 300 }, aa.ranges.items[0]);
|
||||||
|
}
|
||||||
|
|
||||||
|
test "allocate within specific search range" {
|
||||||
|
var aa = AddressAllocator{};
|
||||||
|
defer aa.deinit(testing.allocator);
|
||||||
|
|
||||||
|
try aa.block(testing.allocator, .{ .start = 0, .end = 100 }, 0);
|
||||||
|
try aa.block(testing.allocator, .{ .start = 400, .end = 500 }, 0);
|
||||||
|
|
||||||
|
// Search range starts after first block and has a gap
|
||||||
|
const search_range = Range{ .start = 200, .end = 400 };
|
||||||
|
const allocated = try aa.allocate(testing.allocator, 100, search_range);
|
||||||
|
try testing.expectEqual(Range{ .start = 200, .end = 300 }, allocated);
|
||||||
|
try testing.expectEqual(3, aa.ranges.items.len);
|
||||||
|
try testing.expectEqual(Range{ .start = 0, .end = 100 }, aa.ranges.items[0]);
|
||||||
|
try testing.expectEqual(Range{ .start = 400, .end = 500 }, aa.ranges.items[2]);
|
||||||
|
try testing.expectEqual(Range{ .start = 200, .end = 300 }, aa.ranges.items[1]);
|
||||||
|
}
|
||||||
|
|
||||||
|
test "allocate exact gap size" {
|
||||||
|
var aa = AddressAllocator{};
|
||||||
|
defer aa.deinit(testing.allocator);
|
||||||
|
|
||||||
|
try aa.block(testing.allocator, .{ .start = 0, .end = 100 }, 0);
|
||||||
|
try aa.block(testing.allocator, .{ .start = 200, .end = 300 }, 0);
|
||||||
|
|
||||||
|
const search_range = Range{ .start = 0, .end = 1000 };
|
||||||
|
const allocated = try aa.allocate(testing.allocator, 100, search_range);
|
||||||
|
try testing.expectEqual(Range{ .start = 100, .end = 200 }, allocated);
|
||||||
|
try testing.expectEqual(1, aa.ranges.items.len);
|
||||||
|
try testing.expectEqual(Range{ .start = 0, .end = 300 }, aa.ranges.items[0]);
|
||||||
|
}
|
||||||
|
|
||||||
|
test "allocate fails when too large" {
|
||||||
|
var aa = AddressAllocator{};
|
||||||
|
defer aa.deinit(testing.allocator);
|
||||||
|
|
||||||
|
try aa.block(testing.allocator, .{ .start = 0, .end = 100 }, 0);
|
||||||
|
try aa.block(testing.allocator, .{ .start = 200, .end = 300 }, 0);
|
||||||
|
|
||||||
|
const search_range = Range{ .start = 0, .end = 400 };
|
||||||
|
const allocated = try aa.allocate(testing.allocator, 101, search_range);
|
||||||
|
try std.testing.expect(allocated == null);
|
||||||
|
}
|
||||||
|
|
||||||
|
test "allocate with zero size" {
|
||||||
|
var aa = AddressAllocator{};
|
||||||
|
defer aa.deinit(testing.allocator);
|
||||||
|
|
||||||
|
const search_range = Range{ .start = 0, .end = 1000 };
|
||||||
|
const allocated = try aa.allocate(testing.allocator, 0, search_range);
|
||||||
|
try std.testing.expect(allocated == null);
|
||||||
|
}
|
||||||
|
|
||||||
|
test "allocate with size bigger than range" {
|
||||||
|
var aa = AddressAllocator{};
|
||||||
|
defer aa.deinit(testing.allocator);
|
||||||
|
|
||||||
|
const search_range = Range{ .start = 0, .end = 100 };
|
||||||
|
const allocated = try aa.allocate(testing.allocator, 1000, search_range);
|
||||||
|
try std.testing.expect(allocated == null);
|
||||||
|
}
|
||||||
435
src/PatchLocationIterator.zig
Normal file
435
src/PatchLocationIterator.zig
Normal file
@@ -0,0 +1,435 @@
|
|||||||
|
//! Iterates through all possible valid address ranges for a `jmp rel32` instruction based on a
|
||||||
|
//! 4-byte pattern of "free" and "used" bytes.
|
||||||
|
//!
|
||||||
|
//! This is the core utility for implementing E9Patch-style instruction punning (B2) and padded
|
||||||
|
//! jumps (T1).
|
||||||
|
const std = @import("std");
|
||||||
|
const testing = std.testing;
|
||||||
|
const assert = std.debug.assert;
|
||||||
|
|
||||||
|
const log = std.log.scoped(.patch_location_iterator);
|
||||||
|
|
||||||
|
const Range = @import("Range.zig");
|
||||||
|
|
||||||
|
/// Represents a single byte in the 4-byte `rel32` offset pattern.
|
||||||
|
pub const PatchByte = union(enum) {
|
||||||
|
/// This byte can be any value (0x00-0xFF).
|
||||||
|
free: void,
|
||||||
|
/// This byte is constrained to a specific value.
|
||||||
|
used: u8,
|
||||||
|
|
||||||
|
pub fn format(self: @This(), writer: *std.Io.Writer) std.Io.Writer.Error!void {
|
||||||
|
switch (self) {
|
||||||
|
.free => try writer.print("free", .{}),
|
||||||
|
.used => |val| try writer.print("used({x})", .{val}),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
const patch_size = 4;
|
||||||
|
const PatchInt = std.meta.Int(.signed, patch_size * 8);
|
||||||
|
const PatchLocationIterator = @This();
|
||||||
|
/// The base address (e.g., RIP of the *next* instruction) that the 32-bit relative offset is
|
||||||
|
/// calculated from.
|
||||||
|
offset: i64,
|
||||||
|
/// The 4-byte little-endian pattern of `used` and `free` bytes that constrain the `rel32` offset.
|
||||||
|
patch_bytes: [patch_size]PatchByte,
|
||||||
|
/// Internal state: the byte-level representation of the *start* of the current `rel32` offset being
|
||||||
|
/// iterated.
|
||||||
|
start: [patch_size]u8,
|
||||||
|
/// Internal state: the byte-level representation of the *end* of the current `rel32` offset being
|
||||||
|
/// iterated.
|
||||||
|
end: [patch_size]u8,
|
||||||
|
/// Internal state: flag to handle the first call to `next()` uniquely.
|
||||||
|
first: bool,
|
||||||
|
/// Internal state: optimization cache for the number of contiguous `.free` bytes at the *end* of
|
||||||
|
/// `patch_bytes`.
|
||||||
|
trailing_free_count: u8,
|
||||||
|
|
||||||
|
/// Initializes the iterator.
|
||||||
|
/// - `patch_bytes`: The 4-byte pattern of the `rel32` offset, in little-endian order.
|
||||||
|
/// - `offset`: The address of the *next* instruction (i.e., `RIP` after the 5-byte `jmp`).
|
||||||
|
/// All returned ranges will be relative to this offset.
|
||||||
|
pub fn init(patch_bytes: [patch_size]PatchByte, offset: u64) PatchLocationIterator {
|
||||||
|
log.debug("hi", .{});
|
||||||
|
assert(patch_bytes.len == patch_size);
|
||||||
|
|
||||||
|
// Find the number of contiguous free bytes at the end of the pattern.
|
||||||
|
var trailing_free: u8 = 0;
|
||||||
|
for (0..patch_bytes.len) |i| {
|
||||||
|
if (patch_bytes[i] == .free) {
|
||||||
|
trailing_free += 1;
|
||||||
|
} else {
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
var start = std.mem.zeroes([patch_size]u8);
|
||||||
|
var end = std.mem.zeroes([patch_size]u8);
|
||||||
|
for (patch_bytes, 0..) |byte, i| {
|
||||||
|
switch (byte) {
|
||||||
|
.free => {
|
||||||
|
start[i] = 0;
|
||||||
|
end[i] = if (i < trailing_free) 0xff else 0;
|
||||||
|
},
|
||||||
|
.used => |val| {
|
||||||
|
start[i] = val;
|
||||||
|
end[i] = val;
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
const out = PatchLocationIterator{
|
||||||
|
.offset = @intCast(offset),
|
||||||
|
.patch_bytes = patch_bytes,
|
||||||
|
.trailing_free_count = trailing_free,
|
||||||
|
.start = start,
|
||||||
|
.end = end,
|
||||||
|
.first = true,
|
||||||
|
};
|
||||||
|
log.debug("init: {f}", .{out});
|
||||||
|
return out;
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Returns the next valid `Range` of target addresses, or `null` if the iteration is complete.
|
||||||
|
pub fn next(self: *PatchLocationIterator) ?Range {
|
||||||
|
defer self.first = false;
|
||||||
|
|
||||||
|
// If the first byte is predetermined and the offset would always be negative we don't need to
|
||||||
|
// iterate.
|
||||||
|
const last_byte = self.patch_bytes[patch_size - 1];
|
||||||
|
if (last_byte == .used and last_byte.used & 0x80 != 0) {
|
||||||
|
log.info(
|
||||||
|
"next: Search aborted, pattern has predetermined negative offset (last_byte=0x{x})",
|
||||||
|
.{last_byte.used},
|
||||||
|
);
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
|
||||||
|
// If all bytes are free we can just return the maximum range.
|
||||||
|
if (self.trailing_free_count == patch_size) {
|
||||||
|
if (self.first) {
|
||||||
|
const range = Range{ .start = self.offset, .end = self.offset + std.math.maxInt(i32) };
|
||||||
|
log.debug("next: All bytes free, returning full positive range: {}", .{range});
|
||||||
|
return range;
|
||||||
|
} else {
|
||||||
|
log.info("next: All bytes free, iteration finished.", .{});
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if (self.first) {
|
||||||
|
const range = Range{
|
||||||
|
.start = std.mem.readInt(PatchInt, self.start[0..], .little) + self.offset,
|
||||||
|
.end = std.mem.readInt(PatchInt, self.end[0..], .little) + self.offset,
|
||||||
|
};
|
||||||
|
log.debug("next: First call, returning initial range: {}", .{range});
|
||||||
|
return range;
|
||||||
|
}
|
||||||
|
|
||||||
|
var overflow: u1 = 1;
|
||||||
|
for (self.patch_bytes, 0..) |byte, i| {
|
||||||
|
if (i < self.trailing_free_count or byte == .used) {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
assert(byte == .free);
|
||||||
|
assert(self.start[i] == self.end[i]);
|
||||||
|
defer assert(self.start[i] == self.end[i]);
|
||||||
|
|
||||||
|
if (overflow == 1) {
|
||||||
|
const max: u8 = if (i < patch_size - 1) std.math.maxInt(u8) else std.math.maxInt(i8);
|
||||||
|
if (self.start[i] == max) {
|
||||||
|
self.start[i] = 0;
|
||||||
|
self.end[i] = 0;
|
||||||
|
} else {
|
||||||
|
self.start[i] += 1;
|
||||||
|
self.end[i] += 1;
|
||||||
|
overflow = 0;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if (overflow == 1) {
|
||||||
|
log.info("next: Iteration finished, no more ranges.", .{});
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
|
||||||
|
const start = std.mem.readInt(PatchInt, self.start[0..], .little);
|
||||||
|
const end = std.mem.readInt(PatchInt, self.end[0..], .little);
|
||||||
|
assert(end >= start);
|
||||||
|
const range = Range{
|
||||||
|
.start = start + self.offset,
|
||||||
|
.end = end + self.offset,
|
||||||
|
};
|
||||||
|
log.debug("next: new range: {}", .{range});
|
||||||
|
return range;
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn format(self: PatchLocationIterator, writer: *std.Io.Writer) std.Io.Writer.Error!void {
|
||||||
|
try writer.print(".{{ ", .{});
|
||||||
|
try writer.print(".offset = {x}, ", .{self.offset});
|
||||||
|
try writer.print(
|
||||||
|
".patch_bytes = .{{ {f}, {f}, {f}, {f} }}, ",
|
||||||
|
.{ self.patch_bytes[0], self.patch_bytes[1], self.patch_bytes[2], self.patch_bytes[3] },
|
||||||
|
);
|
||||||
|
try writer.print(
|
||||||
|
".start: 0x{x}, .end: 0x{x}, first: {}, trailing_free_count: {}",
|
||||||
|
.{ self.start, self.end, self.first, self.trailing_free_count },
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
test "free bytes" {
|
||||||
|
const pattern = [_]PatchByte{
|
||||||
|
.{ .free = {} },
|
||||||
|
.{ .free = {} },
|
||||||
|
.{ .free = {} },
|
||||||
|
.{ .free = {} },
|
||||||
|
};
|
||||||
|
var it = PatchLocationIterator.init(pattern, 0);
|
||||||
|
|
||||||
|
try testing.expectEqual(
|
||||||
|
Range{ .start = 0x00000000, .end = 0x7fffffff },
|
||||||
|
it.next().?,
|
||||||
|
);
|
||||||
|
try testing.expectEqual(null, it.next());
|
||||||
|
}
|
||||||
|
|
||||||
|
test "predetermined negative" {
|
||||||
|
const pattern = [_]PatchByte{
|
||||||
|
.{ .free = {} },
|
||||||
|
.{ .free = {} },
|
||||||
|
.{ .free = {} },
|
||||||
|
.{ .used = 0xe9 },
|
||||||
|
};
|
||||||
|
var it = PatchLocationIterator.init(pattern, 0);
|
||||||
|
try testing.expectEqual(null, it.next());
|
||||||
|
it = PatchLocationIterator.init(pattern, 0x12345678);
|
||||||
|
try testing.expectEqual(null, it.next());
|
||||||
|
}
|
||||||
|
|
||||||
|
test "trailing free bytes" {
|
||||||
|
const pattern = [_]PatchByte{
|
||||||
|
.{ .free = {} },
|
||||||
|
.{ .free = {} },
|
||||||
|
.{ .free = {} },
|
||||||
|
.{ .used = 0x79 },
|
||||||
|
};
|
||||||
|
var it = PatchLocationIterator.init(pattern, 0);
|
||||||
|
|
||||||
|
try testing.expectEqual(
|
||||||
|
Range{ .start = 0x79000000, .end = 0x79ffffff },
|
||||||
|
it.next().?,
|
||||||
|
);
|
||||||
|
try testing.expectEqual(null, it.next());
|
||||||
|
}
|
||||||
|
|
||||||
|
test "inner and trailing free bytes" {
|
||||||
|
const pattern = [_]PatchByte{
|
||||||
|
.{ .free = {} },
|
||||||
|
.{ .used = 0xe8 },
|
||||||
|
.{ .free = {} },
|
||||||
|
.{ .used = 0x79 },
|
||||||
|
};
|
||||||
|
var it = PatchLocationIterator.init(pattern, 0);
|
||||||
|
|
||||||
|
try testing.expectEqual(
|
||||||
|
Range{ .start = 0x7900e800, .end = 0x7900e8ff },
|
||||||
|
it.next().?,
|
||||||
|
);
|
||||||
|
try testing.expectEqual(
|
||||||
|
Range{ .start = 0x7901e800, .end = 0x7901e8ff },
|
||||||
|
it.next().?,
|
||||||
|
);
|
||||||
|
|
||||||
|
// Skip to the last range
|
||||||
|
var r_last: ?Range = null;
|
||||||
|
var count: u32 = 2; // We already consumed two
|
||||||
|
while (it.next()) |r| {
|
||||||
|
r_last = r;
|
||||||
|
count += 1;
|
||||||
|
}
|
||||||
|
try testing.expectEqual(
|
||||||
|
Range{ .start = 0x79ffe800, .end = 0x79ffe8ff },
|
||||||
|
r_last,
|
||||||
|
);
|
||||||
|
try testing.expectEqual(256, count);
|
||||||
|
}
|
||||||
|
|
||||||
|
test "no free bytes" {
|
||||||
|
const pattern = [_]PatchByte{
|
||||||
|
.{ .used = 0xe9 },
|
||||||
|
.{ .used = 0x00 },
|
||||||
|
.{ .used = 0x00 },
|
||||||
|
.{ .used = 0x78 },
|
||||||
|
};
|
||||||
|
var it = PatchLocationIterator.init(pattern, 0);
|
||||||
|
|
||||||
|
try testing.expectEqual(
|
||||||
|
Range{ .start = 0x780000e9, .end = 0x780000e9 },
|
||||||
|
it.next().?,
|
||||||
|
);
|
||||||
|
try testing.expectEqual(null, it.next());
|
||||||
|
}
|
||||||
|
|
||||||
|
test "inner and leading free bytes" {
|
||||||
|
const pattern = [_]PatchByte{
|
||||||
|
.{ .used = 0xe9 },
|
||||||
|
.{ .free = {} },
|
||||||
|
.{ .used = 0xe8 },
|
||||||
|
.{ .free = {} },
|
||||||
|
};
|
||||||
|
var it = PatchLocationIterator.init(pattern, 0);
|
||||||
|
|
||||||
|
try testing.expectEqual(
|
||||||
|
Range{ .start = 0x00e800e9, .end = 0x00e800e9 },
|
||||||
|
it.next().?,
|
||||||
|
);
|
||||||
|
try testing.expectEqual(
|
||||||
|
Range{ .start = 0x00e801e9, .end = 0x00e801e9 },
|
||||||
|
it.next().?,
|
||||||
|
);
|
||||||
|
|
||||||
|
// Skip to the last range
|
||||||
|
var r_last: ?Range = null;
|
||||||
|
var count: u32 = 2; // We already consumed two
|
||||||
|
while (it.next()) |r| {
|
||||||
|
r_last = r;
|
||||||
|
count += 1;
|
||||||
|
}
|
||||||
|
try testing.expectEqual(
|
||||||
|
Range{ .start = 0x7fe8ffe9, .end = 0x7fe8ffe9 },
|
||||||
|
r_last,
|
||||||
|
);
|
||||||
|
try testing.expectEqual(256 * 128, count);
|
||||||
|
}
|
||||||
|
|
||||||
|
test "only inner" {
|
||||||
|
const pattern = [_]PatchByte{
|
||||||
|
.{ .used = 0xe9 },
|
||||||
|
.{ .free = {} },
|
||||||
|
.{ .free = {} },
|
||||||
|
.{ .used = 0x78 },
|
||||||
|
};
|
||||||
|
var it = PatchLocationIterator.init(pattern, 0);
|
||||||
|
|
||||||
|
try testing.expectEqual(
|
||||||
|
Range{ .start = 0x780000e9, .end = 0x780000e9 },
|
||||||
|
it.next().?,
|
||||||
|
);
|
||||||
|
try testing.expectEqual(
|
||||||
|
Range{ .start = 0x780001e9, .end = 0x780001e9 },
|
||||||
|
it.next().?,
|
||||||
|
);
|
||||||
|
|
||||||
|
// Skip to the last range
|
||||||
|
var r_last: ?Range = null;
|
||||||
|
var count: u32 = 2; // We already consumed two
|
||||||
|
while (it.next()) |r| {
|
||||||
|
r_last = r;
|
||||||
|
count += 1;
|
||||||
|
}
|
||||||
|
try testing.expectEqual(
|
||||||
|
Range{ .start = 0x78ffffe9, .end = 0x78ffffe9 },
|
||||||
|
r_last,
|
||||||
|
);
|
||||||
|
try testing.expectEqual(256 * 256, count);
|
||||||
|
}
|
||||||
|
|
||||||
|
test "trailing free bytes offset" {
|
||||||
|
const pattern = [_]PatchByte{
|
||||||
|
.{ .free = {} },
|
||||||
|
.{ .free = {} },
|
||||||
|
.{ .free = {} },
|
||||||
|
.{ .used = 0x79 },
|
||||||
|
};
|
||||||
|
const offset = 0x12345678;
|
||||||
|
var it = PatchLocationIterator.init(pattern, offset);
|
||||||
|
|
||||||
|
try testing.expectEqual(
|
||||||
|
Range{ .start = offset + 0x79000000, .end = offset + 0x79ffffff },
|
||||||
|
it.next().?,
|
||||||
|
);
|
||||||
|
try testing.expectEqual(null, it.next());
|
||||||
|
}
|
||||||
|
|
||||||
|
test "trailing and leading offset" {
|
||||||
|
const pattern = [_]PatchByte{
|
||||||
|
.{ .free = {} },
|
||||||
|
.{ .used = 0xe9 },
|
||||||
|
.{ .used = 0xe8 },
|
||||||
|
.{ .free = {} },
|
||||||
|
};
|
||||||
|
const offset = 0x12345678;
|
||||||
|
var it = PatchLocationIterator.init(pattern, offset);
|
||||||
|
|
||||||
|
try testing.expectEqual(
|
||||||
|
Range{ .start = offset + 0x00e8e900, .end = offset + 0x00e8e9ff },
|
||||||
|
it.next().?,
|
||||||
|
);
|
||||||
|
try testing.expectEqual(
|
||||||
|
Range{ .start = offset + 0x01e8e900, .end = offset + 0x01e8e9ff },
|
||||||
|
it.next().?,
|
||||||
|
);
|
||||||
|
|
||||||
|
// Skip to the last range
|
||||||
|
var r_last: ?Range = null;
|
||||||
|
var count: u32 = 2; // We already consumed two
|
||||||
|
while (it.next()) |r| {
|
||||||
|
r_last = r;
|
||||||
|
count += 1;
|
||||||
|
}
|
||||||
|
try testing.expectEqual(
|
||||||
|
Range{ .start = offset + 0x7fe8e900, .end = offset + 0x7fe8e9ff },
|
||||||
|
r_last,
|
||||||
|
);
|
||||||
|
try testing.expectEqual(128, count);
|
||||||
|
}
|
||||||
|
|
||||||
|
test "trailing free bytes large offset" {
|
||||||
|
const pattern = [_]PatchByte{
|
||||||
|
.{ .free = {} },
|
||||||
|
.{ .free = {} },
|
||||||
|
.{ .free = {} },
|
||||||
|
.{ .used = 0x79 },
|
||||||
|
};
|
||||||
|
const offset = 0x12345678;
|
||||||
|
var it = PatchLocationIterator.init(pattern, offset);
|
||||||
|
|
||||||
|
try testing.expectEqual(
|
||||||
|
Range{ .start = offset + 0x79000000, .end = offset + 0x79ffffff },
|
||||||
|
it.next().?,
|
||||||
|
);
|
||||||
|
try testing.expectEqual(null, it.next());
|
||||||
|
}
|
||||||
|
|
||||||
|
test "trailing and leading large offset" {
|
||||||
|
const pattern = [_]PatchByte{
|
||||||
|
.{ .free = {} },
|
||||||
|
.{ .used = 0xe9 },
|
||||||
|
.{ .used = 0xe8 },
|
||||||
|
.{ .free = {} },
|
||||||
|
};
|
||||||
|
const offset = 0x12345678;
|
||||||
|
var it = PatchLocationIterator.init(pattern, offset);
|
||||||
|
|
||||||
|
try testing.expectEqual(
|
||||||
|
Range{ .start = offset + 0x00e8e900, .end = offset + 0x00e8e9ff },
|
||||||
|
it.next().?,
|
||||||
|
);
|
||||||
|
try testing.expectEqual(
|
||||||
|
Range{ .start = offset + 0x01e8e900, .end = offset + 0x01e8e9ff },
|
||||||
|
it.next().?,
|
||||||
|
);
|
||||||
|
|
||||||
|
// Skip to the last range
|
||||||
|
var r_last: ?Range = null;
|
||||||
|
var count: u32 = 2; // We already consumed two
|
||||||
|
while (it.next()) |r| {
|
||||||
|
r_last = r;
|
||||||
|
count += 1;
|
||||||
|
}
|
||||||
|
try testing.expectEqual(
|
||||||
|
Range{ .start = offset + 0x7fe8e900, .end = offset + 0x7fe8e9ff },
|
||||||
|
r_last,
|
||||||
|
);
|
||||||
|
try testing.expectEqual(128, count);
|
||||||
|
}
|
||||||
272
src/Patcher.zig
Normal file
272
src/Patcher.zig
Normal file
@@ -0,0 +1,272 @@
|
|||||||
|
const std = @import("std");
|
||||||
|
const builtin = @import("builtin");
|
||||||
|
const testing = std.testing;
|
||||||
|
const math = std.math;
|
||||||
|
const mem = std.mem;
|
||||||
|
const posix = std.posix;
|
||||||
|
const zydis = @import("zydis").zydis;
|
||||||
|
const disassembler = @import("disassembler.zig");
|
||||||
|
|
||||||
|
const log = std.log.scoped(.patcher);
|
||||||
|
const AddressAllocator = @import("AddressAllocator.zig");
|
||||||
|
const InstructionFormatter = disassembler.InstructionFormatter;
|
||||||
|
const InstructionIterator = disassembler.InstructionIterator;
|
||||||
|
const PatchLocationIterator = @import("PatchLocationIterator.zig");
|
||||||
|
const Range = @import("Range.zig");
|
||||||
|
|
||||||
|
const assert = std.debug.assert;
|
||||||
|
|
||||||
|
const page_size = 4096;
|
||||||
|
const jump_rel32: u8 = 0xe9;
|
||||||
|
const jump_rel32_size = 5;
|
||||||
|
const jump_rel8: u8 = 0xeb;
|
||||||
|
const jump_rel8_size = 2;
|
||||||
|
const max_ins_bytes = 15;
|
||||||
|
// Based on the paper 'x86-64 Instruction Usage among C/C++ Applications' by 'Akshintala et al.'
|
||||||
|
// it's '4.25' bytes, so 4 is good enough. (https://oscarlab.github.io/papers/instrpop-systor19.pdf)
|
||||||
|
const avg_ins_bytes = 4;
|
||||||
|
|
||||||
|
// TODO: Find an invalid instruction to use.
|
||||||
|
// const invalid: u8 = 0xaa;
|
||||||
|
const int3: u8 = 0xcc;
|
||||||
|
|
||||||
|
// Prefixes for Padded Jumps (Tactic T1)
|
||||||
|
const prefix_fs: u8 = 0x64;
|
||||||
|
const prefix_gs: u8 = 0x65;
|
||||||
|
const prefix_ss: u8 = 0x36;
|
||||||
|
const prefixes = [_]u8{ prefix_fs, prefix_gs, prefix_ss };
|
||||||
|
|
||||||
|
const Patcher = @This();
|
||||||
|
|
||||||
|
gpa: mem.Allocator,
|
||||||
|
flicken: std.StringArrayHashMapUnmanaged(Flicken) = .empty,
|
||||||
|
address_allocator: AddressAllocator = .empty,
|
||||||
|
/// Tracks the base addresses of pages we have mmap'd for Flicken.
|
||||||
|
allocated_pages: std.AutoHashMapUnmanaged(u64, void) = .empty,
|
||||||
|
|
||||||
|
pub fn init(gpa: mem.Allocator) !Patcher {
|
||||||
|
var flicken: std.StringArrayHashMapUnmanaged(Flicken) = .empty;
|
||||||
|
try flicken.ensureTotalCapacity(gpa, 8);
|
||||||
|
flicken.putAssumeCapacity("nop", .{ .name = "nop", .bytes = &.{} });
|
||||||
|
return .{
|
||||||
|
.gpa = gpa,
|
||||||
|
.flicken = flicken,
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn deinit(patcher: *Patcher) void {
|
||||||
|
_ = patcher;
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Flicken name and bytes have to be valid for the lifetime it's used. If a trampoline with the
|
||||||
|
/// name is already registered it gets overwritten.
|
||||||
|
/// NOTE: The name "nop" is reserved and always has the ID 0.
|
||||||
|
pub fn addFlicken(patcher: *Patcher, trampoline: Flicken) !FlickenId {
|
||||||
|
assert(!mem.eql(u8, "nop", trampoline.name));
|
||||||
|
try patcher.flicken.ensureUnusedCapacity(patcher.gpa, 1);
|
||||||
|
errdefer comptime unreachable;
|
||||||
|
|
||||||
|
const gop = patcher.flicken.getOrPutAssumeCapacity(trampoline.name);
|
||||||
|
if (gop.found_existing) {
|
||||||
|
log.warn("addTrampoline: Overwriting existing trampoline: {s}", .{trampoline.name});
|
||||||
|
}
|
||||||
|
gop.key_ptr.* = trampoline.name;
|
||||||
|
gop.value_ptr.* = trampoline;
|
||||||
|
return @enumFromInt(gop.index);
|
||||||
|
}
|
||||||
|
|
||||||
|
pub const Flicken = struct {
|
||||||
|
name: []const u8,
|
||||||
|
bytes: []const u8,
|
||||||
|
|
||||||
|
pub fn size(flicken: *const Flicken) u64 {
|
||||||
|
return flicken.bytes.len + jump_rel32_size;
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
pub const FlickenId = enum(u64) { nop = 0, _ };
|
||||||
|
|
||||||
|
pub const PatchRequest = struct {
|
||||||
|
/// Must point to first byte of an instruction.
|
||||||
|
flicken: FlickenId,
|
||||||
|
/// Bytes of the instruction. Can be used to get the address of the instruction.
|
||||||
|
bytes: []u8,
|
||||||
|
|
||||||
|
pub fn desc(_: void, lhs: PatchRequest, rhs: PatchRequest) bool {
|
||||||
|
return @intFromPtr(lhs.bytes.ptr) > @intFromPtr(rhs.bytes.ptr);
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn format(
|
||||||
|
self: @This(),
|
||||||
|
writer: *std.Io.Writer,
|
||||||
|
) std.Io.Writer.Error!void {
|
||||||
|
try writer.print(
|
||||||
|
".{{ .address = 0x{x}, .bytes = 0x{x}, .flicken = {} }}",
|
||||||
|
.{ @intFromPtr(self.bytes.ptr), self.bytes, @intFromEnum(self.flicken) },
|
||||||
|
);
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
pub fn patchRegion(patcher: *Patcher, region: []align(page_size) u8) !void {
|
||||||
|
{
|
||||||
|
// Block the region, such that we don't try to allocate there anymore.
|
||||||
|
const start: i64 = @intCast(@intFromPtr(region.ptr));
|
||||||
|
try patcher.address_allocator.block(
|
||||||
|
patcher.gpa,
|
||||||
|
.{ .start = start, .end = start + @as(i64, @intCast(region.len)) },
|
||||||
|
page_size,
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
var arena_impl = std.heap.ArenaAllocator.init(patcher.gpa);
|
||||||
|
const arena = arena_impl.allocator();
|
||||||
|
defer arena_impl.deinit();
|
||||||
|
var patch_requests: std.ArrayListUnmanaged(PatchRequest) = .empty;
|
||||||
|
|
||||||
|
{
|
||||||
|
// Get where to patch.
|
||||||
|
var instruction_iterator = InstructionIterator.init(region);
|
||||||
|
while (instruction_iterator.next()) |instruction| {
|
||||||
|
const should_patch: bool = instruction.instruction.attributes & zydis.ZYDIS_ATTRIB_HAS_LOCK > 0;
|
||||||
|
if (should_patch) {
|
||||||
|
const start = instruction.address - @intFromPtr(region.ptr);
|
||||||
|
const request: PatchRequest = .{
|
||||||
|
.bytes = region[start..][0..instruction.instruction.length],
|
||||||
|
.flicken = .nop,
|
||||||
|
};
|
||||||
|
try patch_requests.append(arena, request);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
log.info("patchRegion: Got {} patch requests", .{patch_requests.items.len});
|
||||||
|
}
|
||||||
|
|
||||||
|
// Sort patch requests in descending order by address, such that we patch from back to front.
|
||||||
|
mem.sortUnstable(PatchRequest, patch_requests.items, {}, PatchRequest.desc);
|
||||||
|
|
||||||
|
{
|
||||||
|
// Check for duplicate patch requests and undefined IDs
|
||||||
|
var last_address: ?[*]u8 = null;
|
||||||
|
for (patch_requests.items, 0..) |request, i| {
|
||||||
|
if (last_address) |last| {
|
||||||
|
if (last == request.bytes.ptr) {
|
||||||
|
var buffer: [256]u8 = undefined;
|
||||||
|
const fmt = disassembler.formatBytes(request.bytes, &buffer);
|
||||||
|
log.err(
|
||||||
|
"patchRegion: Found duplicate patch requests for instruction: {s}",
|
||||||
|
.{fmt},
|
||||||
|
);
|
||||||
|
log.err("patchRegion: request 1: {f}", .{patch_requests.items[i - 1]});
|
||||||
|
log.err("patchRegion: request 2: {f}", .{patch_requests.items[i]});
|
||||||
|
return error.DuplicatePatchRequest;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
last_address = request.bytes.ptr;
|
||||||
|
|
||||||
|
if (@as(u64, @intFromEnum(request.flicken)) >= patcher.flicken.count()) {
|
||||||
|
var buffer: [256]u8 = undefined;
|
||||||
|
const fmt = disassembler.formatBytes(request.bytes, &buffer);
|
||||||
|
log.err(
|
||||||
|
"patchRegion: Usage of undefined flicken in request {f} for instruction: {s}",
|
||||||
|
.{ request, fmt },
|
||||||
|
);
|
||||||
|
return error.undefinedFlicken;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
{
|
||||||
|
// Apply patches.
|
||||||
|
try posix.mprotect(region, posix.PROT.READ | posix.PROT.WRITE);
|
||||||
|
defer posix.mprotect(region, posix.PROT.READ | posix.PROT.EXEC) catch
|
||||||
|
@panic("patchRegion: mprotect back to R|X failed. Can't continue");
|
||||||
|
|
||||||
|
// PERF: A set of the pages for the patches/flicken we made writable. This way we don't
|
||||||
|
// repeatedly change call `mprotect` on the same page to switch it from R|W to R|X and back.
|
||||||
|
// At the end we `mprotect` all pages in this set back to being R|X.
|
||||||
|
var pages_made_writable: std.AutoHashMapUnmanaged(u64, void) = .empty;
|
||||||
|
for (patch_requests.items) |request| {
|
||||||
|
const flicken = patcher.flicken.entries.get(@intFromEnum(request.flicken)).value;
|
||||||
|
if (request.bytes.len < 5) continue; // TODO:
|
||||||
|
|
||||||
|
var iter = PatchLocationIterator.init(
|
||||||
|
.{ .free, .free, .free, .free },
|
||||||
|
@intFromPtr(request.bytes.ptr),
|
||||||
|
);
|
||||||
|
while (iter.next()) |valid_range| {
|
||||||
|
const patch_range = try patcher.address_allocator.allocate(
|
||||||
|
patcher.gpa,
|
||||||
|
flicken.size(),
|
||||||
|
valid_range,
|
||||||
|
) orelse continue;
|
||||||
|
assert(patch_range.size() == flicken.size());
|
||||||
|
|
||||||
|
{
|
||||||
|
// Map patch_range as R|W.
|
||||||
|
const start_page = mem.alignBackward(u64, patch_range.getStart(u64), page_size);
|
||||||
|
const end_page = mem.alignBackward(u64, patch_range.getEnd(u64), page_size);
|
||||||
|
const protection = posix.PROT.READ | posix.PROT.WRITE;
|
||||||
|
var page_addr = start_page;
|
||||||
|
while (page_addr <= end_page) : (page_addr += page_size) {
|
||||||
|
// If the page is already writable we don't need to do anything;
|
||||||
|
if (pages_made_writable.get(page_addr)) |_| continue;
|
||||||
|
|
||||||
|
const gop = try patcher.allocated_pages.getOrPut(patcher.gpa, page_addr);
|
||||||
|
if (gop.found_existing) {
|
||||||
|
const ptr: [*]align(page_size) u8 = @ptrFromInt(page_addr);
|
||||||
|
try posix.mprotect(ptr[0..page_size], protection);
|
||||||
|
} else {
|
||||||
|
const addr = try posix.mmap(
|
||||||
|
@ptrFromInt(page_addr),
|
||||||
|
page_size,
|
||||||
|
protection,
|
||||||
|
.{ .TYPE = .PRIVATE, .ANONYMOUS = true, .FIXED_NOREPLACE = true },
|
||||||
|
-1,
|
||||||
|
0,
|
||||||
|
);
|
||||||
|
assert(@as(u64, @intFromPtr(addr.ptr)) == page_addr);
|
||||||
|
// `gop.value_ptr.* = {};` is not needed because it's void.
|
||||||
|
}
|
||||||
|
try pages_made_writable.put(patcher.gpa, page_addr, {});
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
const flicken_addr: [*]u8 = @ptrFromInt(patch_range.getStart(u64));
|
||||||
|
const flicken_slice = flicken_addr[0 .. flicken.bytes.len + 5];
|
||||||
|
|
||||||
|
const jump_to_offset: i32 = blk: {
|
||||||
|
const from: i64 = @intCast(@intFromPtr(request.bytes.ptr) + jump_rel32_size);
|
||||||
|
const to = patch_range.start;
|
||||||
|
break :blk @intCast(to - from);
|
||||||
|
};
|
||||||
|
request.bytes[0] = jump_rel32;
|
||||||
|
mem.writeInt(i32, request.bytes[1..5], jump_to_offset, .little);
|
||||||
|
for (request.bytes[5..]) |*b| {
|
||||||
|
b.* = int3;
|
||||||
|
}
|
||||||
|
|
||||||
|
const jump_back_offset: i32 = blk: {
|
||||||
|
const from = patch_range.end;
|
||||||
|
const to: i64 = @intCast(@intFromPtr(request.bytes.ptr) + request.bytes.len);
|
||||||
|
break :blk @intCast(to - from);
|
||||||
|
};
|
||||||
|
@memcpy(flicken_addr, flicken.bytes);
|
||||||
|
flicken_slice[flicken.bytes.len] = jump_rel32;
|
||||||
|
mem.writeInt(i32, flicken_slice[flicken.bytes.len + 1 ..][0..4], jump_back_offset, .little);
|
||||||
|
|
||||||
|
// The jumps have to be in the opposite direction.
|
||||||
|
assert(math.sign(jump_to_offset) * math.sign(jump_back_offset) < 0);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Change pages back to R|X.
|
||||||
|
var iter = pages_made_writable.keyIterator();
|
||||||
|
const protection = posix.PROT.READ | posix.PROT.EXEC;
|
||||||
|
while (iter.next()) |page_addr| {
|
||||||
|
const ptr: [*]align(page_size) u8 = @ptrFromInt(page_addr.*);
|
||||||
|
try posix.mprotect(ptr[0..page_size], protection);
|
||||||
|
}
|
||||||
|
|
||||||
|
log.info("patchRegion: Finished applying patches", .{});
|
||||||
|
}
|
||||||
|
// TODO: statistics
|
||||||
|
}
|
||||||
177
src/Range.zig
Normal file
177
src/Range.zig
Normal file
@@ -0,0 +1,177 @@
|
|||||||
|
//! Represents some kind of signed range with an inclusive lower bound and an exclusive upper bound.
|
||||||
|
//! An empty Range can be represented by `start == end`.
|
||||||
|
|
||||||
|
const std = @import("std");
|
||||||
|
|
||||||
|
const assert = std.debug.assert;
|
||||||
|
|
||||||
|
const Range = @This();
|
||||||
|
/// Inclusive lower bound of the range.
|
||||||
|
start: i64,
|
||||||
|
/// Exclusive upper bound of the range.
|
||||||
|
end: i64,
|
||||||
|
|
||||||
|
pub fn size(range: Range) u64 {
|
||||||
|
assert(range.end >= range.start);
|
||||||
|
// return @intCast(@as(i128, range.end) - range.start); // prevent overflow
|
||||||
|
return @intCast(range.end - range.start);
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn alignTo(range: Range, alignment: u64) Range {
|
||||||
|
assert(range.end >= range.start);
|
||||||
|
assert(std.math.isPowerOfTwo(alignment));
|
||||||
|
assert(alignment <= std.math.maxInt(i64));
|
||||||
|
const lower = std.mem.alignBackward(i64, range.start, @intCast(alignment));
|
||||||
|
const upper = std.mem.alignForward(i64, range.end, @intCast(alignment));
|
||||||
|
assert(upper >= lower);
|
||||||
|
return .{ .start = lower, .end = upper };
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn overlaps(range: Range, other: Range) bool {
|
||||||
|
assert(range.end >= range.start);
|
||||||
|
assert(other.end >= other.start);
|
||||||
|
return range.start < other.end and other.start < range.end;
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn equals(range: Range, other: Range) bool {
|
||||||
|
assert(range.end >= range.start);
|
||||||
|
assert(other.end >= other.start);
|
||||||
|
return range.start == other.start and range.end == other.end;
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn contains(range: Range, other: Range) bool {
|
||||||
|
assert(range.end >= range.start);
|
||||||
|
assert(other.end >= other.start);
|
||||||
|
return range.start <= other.start and range.end >= other.end;
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn touches(range: Range, other: Range) bool {
|
||||||
|
assert(range.end >= range.start);
|
||||||
|
assert(other.end >= other.start);
|
||||||
|
return range.start <= other.end and other.start <= range.end;
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Ranges are considered equal if they touch.
|
||||||
|
pub fn compare(lhs: Range, rhs: Range) std.math.Order {
|
||||||
|
assert(lhs.end >= lhs.start);
|
||||||
|
assert(rhs.end >= rhs.start);
|
||||||
|
return if (lhs.start > rhs.end) .gt else if (lhs.end < rhs.start) .lt else .eq;
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn getStart(range: Range, T: type) T {
|
||||||
|
return @intCast(range.start);
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn getEnd(range: Range, T: type) T {
|
||||||
|
return @intCast(range.end);
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn format(
|
||||||
|
self: @This(),
|
||||||
|
writer: *std.Io.Writer,
|
||||||
|
) std.Io.Writer.Error!void {
|
||||||
|
try writer.print(".{{ .start = 0x{x}, .end = 0x{x} }}", .{ self.start, self.end });
|
||||||
|
}
|
||||||
|
|
||||||
|
test "AddressRange size" {
|
||||||
|
const range = Range{ .start = 100, .end = 250 };
|
||||||
|
try std.testing.expectEqual(@as(u64, 150), range.size());
|
||||||
|
}
|
||||||
|
|
||||||
|
test "AddressRange alignTo unaligned" {
|
||||||
|
const range = Range{ .start = 101, .end = 199 };
|
||||||
|
const aligned = range.alignTo(16);
|
||||||
|
try std.testing.expectEqual(@as(i64, 96), aligned.start);
|
||||||
|
try std.testing.expectEqual(@as(i64, 208), aligned.end);
|
||||||
|
}
|
||||||
|
|
||||||
|
test "AddressRange alignTo already aligned" {
|
||||||
|
const range = Range{ .start = 64, .end = 128 };
|
||||||
|
const aligned = range.alignTo(64);
|
||||||
|
try std.testing.expectEqual(@as(i64, 64), aligned.start);
|
||||||
|
try std.testing.expectEqual(@as(i64, 128), aligned.end);
|
||||||
|
}
|
||||||
|
|
||||||
|
test "AddressRange no overlap before" {
|
||||||
|
const base = Range{ .start = 100, .end = 200 };
|
||||||
|
const other = Range{ .start = 0, .end = 100 };
|
||||||
|
try std.testing.expect(!base.overlaps(other));
|
||||||
|
}
|
||||||
|
|
||||||
|
test "AddressRange no overlap after" {
|
||||||
|
const base = Range{ .start = 100, .end = 200 };
|
||||||
|
const other = Range{ .start = 200, .end = 300 };
|
||||||
|
try std.testing.expect(!base.overlaps(other));
|
||||||
|
}
|
||||||
|
|
||||||
|
test "AddressRange overlap at start" {
|
||||||
|
const base = Range{ .start = 100, .end = 200 };
|
||||||
|
const other = Range{ .start = 50, .end = 150 };
|
||||||
|
try std.testing.expect(base.overlaps(other));
|
||||||
|
}
|
||||||
|
|
||||||
|
test "AddressRange overlap at end" {
|
||||||
|
const base = Range{ .start = 100, .end = 200 };
|
||||||
|
const other = Range{ .start = 150, .end = 250 };
|
||||||
|
try std.testing.expect(base.overlaps(other));
|
||||||
|
}
|
||||||
|
|
||||||
|
test "AddressRange overlap contained" {
|
||||||
|
const base = Range{ .start = 100, .end = 200 };
|
||||||
|
const other = Range{ .start = 120, .end = 180 };
|
||||||
|
try std.testing.expect(base.overlaps(other));
|
||||||
|
}
|
||||||
|
|
||||||
|
test "AddressRange overlap containing" {
|
||||||
|
const base = Range{ .start = 100, .end = 200 };
|
||||||
|
const other = Range{ .start = 50, .end = 250 };
|
||||||
|
try std.testing.expect(base.overlaps(other));
|
||||||
|
}
|
||||||
|
|
||||||
|
test "AddressRange overlap identical" {
|
||||||
|
const base = Range{ .start = 100, .end = 200 };
|
||||||
|
const other = Range{ .start = 100, .end = 200 };
|
||||||
|
try std.testing.expect(base.overlaps(other));
|
||||||
|
}
|
||||||
|
|
||||||
|
test "AddressRange touches before" {
|
||||||
|
const base = Range{ .start = 100, .end = 200 };
|
||||||
|
const other = Range{ .start = 0, .end = 100 };
|
||||||
|
try std.testing.expect(base.touches(other));
|
||||||
|
}
|
||||||
|
|
||||||
|
test "AddressRange touches after" {
|
||||||
|
const base = Range{ .start = 100, .end = 200 };
|
||||||
|
const other = Range{ .start = 200, .end = 300 };
|
||||||
|
try std.testing.expect(base.touches(other));
|
||||||
|
}
|
||||||
|
|
||||||
|
test "AddressRange touches at start" {
|
||||||
|
const base = Range{ .start = 100, .end = 200 };
|
||||||
|
const other = Range{ .start = 50, .end = 150 };
|
||||||
|
try std.testing.expect(base.touches(other));
|
||||||
|
}
|
||||||
|
|
||||||
|
test "AddressRange touches at end" {
|
||||||
|
const base = Range{ .start = 100, .end = 200 };
|
||||||
|
const other = Range{ .start = 150, .end = 250 };
|
||||||
|
try std.testing.expect(base.touches(other));
|
||||||
|
}
|
||||||
|
|
||||||
|
test "AddressRange touches contained" {
|
||||||
|
const base = Range{ .start = 100, .end = 200 };
|
||||||
|
const other = Range{ .start = 120, .end = 180 };
|
||||||
|
try std.testing.expect(base.touches(other));
|
||||||
|
}
|
||||||
|
|
||||||
|
test "AddressRange touches containing" {
|
||||||
|
const base = Range{ .start = 100, .end = 200 };
|
||||||
|
const other = Range{ .start = 50, .end = 250 };
|
||||||
|
try std.testing.expect(base.touches(other));
|
||||||
|
}
|
||||||
|
|
||||||
|
test "AddressRange touches identical" {
|
||||||
|
const base = Range{ .start = 100, .end = 200 };
|
||||||
|
const other = Range{ .start = 100, .end = 200 };
|
||||||
|
try std.testing.expect(base.touches(other));
|
||||||
|
}
|
||||||
134
src/disassembler.zig
Normal file
134
src/disassembler.zig
Normal file
@@ -0,0 +1,134 @@
|
|||||||
|
const std = @import("std");
|
||||||
|
const mem = std.mem;
|
||||||
|
const zydis = @import("zydis").zydis;
|
||||||
|
|
||||||
|
const log = std.log.scoped(.disassembler);
|
||||||
|
|
||||||
|
pub const InstructionIterator = struct {
|
||||||
|
decoder: zydis.ZydisDecoder,
|
||||||
|
bytes: []const u8,
|
||||||
|
instruction: zydis.ZydisDecodedInstruction,
|
||||||
|
operands: [zydis.ZYDIS_MAX_OPERAND_COUNT]zydis.ZydisDecodedOperand,
|
||||||
|
|
||||||
|
pub fn init(bytes: []const u8) InstructionIterator {
|
||||||
|
var decoder: zydis.ZydisDecoder = undefined;
|
||||||
|
const status = zydis.ZydisDecoderInit(
|
||||||
|
&decoder,
|
||||||
|
zydis.ZYDIS_MACHINE_MODE_LONG_64,
|
||||||
|
zydis.ZYDIS_STACK_WIDTH_64,
|
||||||
|
);
|
||||||
|
if (!zydis.ZYAN_SUCCESS(status)) @panic("Zydis decoder init failed");
|
||||||
|
return .{
|
||||||
|
.decoder = decoder,
|
||||||
|
.bytes = bytes,
|
||||||
|
.instruction = undefined,
|
||||||
|
.operands = undefined,
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn next(iterator: *InstructionIterator) ?BundledInstruction {
|
||||||
|
var status = zydis.ZydisDecoderDecodeFull(
|
||||||
|
&iterator.decoder,
|
||||||
|
iterator.bytes.ptr,
|
||||||
|
iterator.bytes.len,
|
||||||
|
&iterator.instruction,
|
||||||
|
&iterator.operands,
|
||||||
|
);
|
||||||
|
var address: u64 = @intFromPtr(iterator.bytes.ptr);
|
||||||
|
|
||||||
|
while (!zydis.ZYAN_SUCCESS(status)) {
|
||||||
|
// TODO: handle common padding bytes
|
||||||
|
switch (status) {
|
||||||
|
zydis.ZYDIS_STATUS_NO_MORE_DATA => {
|
||||||
|
log.info("next: Got status: NO_MORE_DATA. Iterator completed.", .{});
|
||||||
|
return null;
|
||||||
|
},
|
||||||
|
zydis.ZYDIS_STATUS_ILLEGAL_LOCK => log.warn("next: Got status: ILLEGAL_LOCK. " ++
|
||||||
|
"Byte stepping, to find next valid instruction begin", .{}),
|
||||||
|
zydis.ZYDIS_STATUS_DECODING_ERROR => log.warn("next: Got status: DECODING_ERROR. " ++
|
||||||
|
"Byte stepping, to find next valid instruction begin", .{}),
|
||||||
|
else => log.warn("next: Got unknown status: 0x{x}. Byte stepping, to find next " ++
|
||||||
|
"valid instruction begin", .{status}),
|
||||||
|
}
|
||||||
|
// TODO: add a flag to instead return an error
|
||||||
|
log.debug(
|
||||||
|
"next: instruction length: {}, address: 0x{x}, bytes: 0x{x}",
|
||||||
|
.{
|
||||||
|
iterator.instruction.length,
|
||||||
|
address,
|
||||||
|
iterator.bytes[0..iterator.instruction.length],
|
||||||
|
},
|
||||||
|
);
|
||||||
|
|
||||||
|
iterator.bytes = iterator.bytes[1..];
|
||||||
|
status = zydis.ZydisDecoderDecodeFull(
|
||||||
|
&iterator.decoder,
|
||||||
|
iterator.bytes.ptr,
|
||||||
|
iterator.bytes.len,
|
||||||
|
&iterator.instruction,
|
||||||
|
&iterator.operands,
|
||||||
|
);
|
||||||
|
address = @intFromPtr(iterator.bytes.ptr);
|
||||||
|
}
|
||||||
|
|
||||||
|
iterator.bytes = iterator.bytes[iterator.instruction.length..];
|
||||||
|
return .{
|
||||||
|
.address = address,
|
||||||
|
.instruction = &iterator.instruction,
|
||||||
|
.operands = iterator.operands[0..iterator.instruction.operand_count_visible],
|
||||||
|
};
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
pub const BundledInstruction = struct {
|
||||||
|
address: u64,
|
||||||
|
instruction: *const zydis.ZydisDecodedInstruction,
|
||||||
|
operands: []const zydis.ZydisDecodedOperand,
|
||||||
|
};
|
||||||
|
|
||||||
|
pub const InstructionFormatter = struct {
|
||||||
|
formatter: zydis.ZydisFormatter,
|
||||||
|
|
||||||
|
pub fn init() InstructionFormatter {
|
||||||
|
var formatter: zydis.ZydisFormatter = undefined;
|
||||||
|
const status = zydis.ZydisFormatterInit(&formatter, zydis.ZYDIS_FORMATTER_STYLE_ATT);
|
||||||
|
if (!zydis.ZYAN_SUCCESS(status)) @panic("Zydis formatter init failed");
|
||||||
|
|
||||||
|
return .{
|
||||||
|
.formatter = formatter,
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn format(
|
||||||
|
formatter: *const InstructionFormatter,
|
||||||
|
instruction: BundledInstruction,
|
||||||
|
buffer: []u8,
|
||||||
|
) []u8 {
|
||||||
|
const status = zydis.ZydisFormatterFormatInstruction(
|
||||||
|
&formatter.formatter,
|
||||||
|
instruction.instruction,
|
||||||
|
instruction.operands.ptr,
|
||||||
|
instruction.instruction.operand_count_visible,
|
||||||
|
buffer.ptr,
|
||||||
|
buffer.len,
|
||||||
|
instruction.address,
|
||||||
|
null,
|
||||||
|
);
|
||||||
|
if (!zydis.ZYAN_SUCCESS(status)) {
|
||||||
|
@panic("wow");
|
||||||
|
}
|
||||||
|
return mem.sliceTo(buffer, 0);
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
/// Disassemble `bytes` and format them into the given buffer. Useful for error reporting or
|
||||||
|
/// debugging purposes.
|
||||||
|
/// This function should not be called in a tight loop as it's intentionally inefficient due tue
|
||||||
|
/// having a simple API.
|
||||||
|
pub fn formatBytes(bytes: []const u8, buffer: []u8) []u8 {
|
||||||
|
var iter = InstructionIterator.init(bytes);
|
||||||
|
|
||||||
|
const instr = iter.next() orelse return buffer[0..0];
|
||||||
|
const formatter = InstructionFormatter.init();
|
||||||
|
return formatter.format(instr, buffer);
|
||||||
|
}
|
||||||
279
src/main.zig
Normal file
279
src/main.zig
Normal file
@@ -0,0 +1,279 @@
|
|||||||
|
const std = @import("std");
|
||||||
|
const builtin = @import("builtin");
|
||||||
|
|
||||||
|
const elf = std.elf;
|
||||||
|
const mem = std.mem;
|
||||||
|
const posix = std.posix;
|
||||||
|
const testing = std.testing;
|
||||||
|
|
||||||
|
const log = std.log.scoped(.flicker);
|
||||||
|
const Patcher = @import("Patcher.zig");
|
||||||
|
|
||||||
|
const assert = std.debug.assert;
|
||||||
|
|
||||||
|
pub const std_options: std.Options = .{
|
||||||
|
.log_level = .info,
|
||||||
|
.log_scope_levels = &.{
|
||||||
|
.{ .scope = .disassembler, .level = .info },
|
||||||
|
.{ .scope = .patcher, .level = .debug },
|
||||||
|
.{ .scope = .patch_location_iterator, .level = .warn },
|
||||||
|
},
|
||||||
|
};
|
||||||
|
const page_size = std.heap.pageSize();
|
||||||
|
const max_interp_path_length = 128;
|
||||||
|
const help =
|
||||||
|
\\Usage:
|
||||||
|
\\ ./flicker [loader_flags] <executable> [args...]
|
||||||
|
\\Flags:
|
||||||
|
\\ -h print this help
|
||||||
|
\\
|
||||||
|
;
|
||||||
|
|
||||||
|
const UnfinishedReadError = error{UnfinishedRead};
|
||||||
|
|
||||||
|
var patcher: Patcher = undefined;
|
||||||
|
|
||||||
|
pub fn main() !void {
|
||||||
|
// Parse arguments
|
||||||
|
var arg_index: u64 = 1; // Skip own name
|
||||||
|
while (arg_index < std.os.argv.len) : (arg_index += 1) {
|
||||||
|
const arg = mem.sliceTo(std.os.argv[arg_index], '0');
|
||||||
|
if (arg[0] != '-') break;
|
||||||
|
if (mem.eql(u8, arg, "-h") or mem.eql(u8, arg, "--help")) {
|
||||||
|
std.debug.print("{s}", .{help});
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
// TODO: Handle loader flags when/if we need them
|
||||||
|
} else {
|
||||||
|
std.debug.print("No executable given.\n", .{});
|
||||||
|
std.debug.print("{s}", .{help});
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Initialize patcher
|
||||||
|
patcher = try Patcher.init(std.heap.page_allocator); // TODO: allocator
|
||||||
|
|
||||||
|
// Map file into memory
|
||||||
|
const file = try lookupFile(mem.sliceTo(std.os.argv[arg_index], 0));
|
||||||
|
var file_buffer: [128]u8 = undefined;
|
||||||
|
var file_reader = file.reader(&file_buffer);
|
||||||
|
log.info("--- Loading executable: {s} ---", .{std.os.argv[arg_index]});
|
||||||
|
const ehdr = try elf.Header.read(&file_reader.interface);
|
||||||
|
const base = try loadStaticElf(ehdr, &file_reader);
|
||||||
|
const entry = ehdr.entry + if (ehdr.type == .DYN) base else 0;
|
||||||
|
log.info("Executable loaded: base=0x{x}, entry=0x{x}", .{ base, entry });
|
||||||
|
|
||||||
|
// Check for dynamic linker
|
||||||
|
var maybe_interp_base: ?usize = null;
|
||||||
|
var maybe_interp_entry: ?usize = null;
|
||||||
|
var phdrs = ehdr.iterateProgramHeaders(&file_reader);
|
||||||
|
while (try phdrs.next()) |phdr| {
|
||||||
|
if (phdr.p_type != elf.PT_INTERP) continue;
|
||||||
|
|
||||||
|
var interp_path: [max_interp_path_length]u8 = undefined;
|
||||||
|
try file_reader.seekTo(phdr.p_offset);
|
||||||
|
if (try file_reader.read(interp_path[0..phdr.p_filesz]) != phdr.p_filesz)
|
||||||
|
return UnfinishedReadError.UnfinishedRead;
|
||||||
|
assert(interp_path[phdr.p_filesz - 1] == 0); // Must be zero terminated
|
||||||
|
log.info("Found interpreter path: {s}", .{interp_path[0 .. phdr.p_filesz - 1]});
|
||||||
|
const interp = try std.fs.cwd().openFile(
|
||||||
|
interp_path[0 .. phdr.p_filesz - 1],
|
||||||
|
.{ .mode = .read_only },
|
||||||
|
);
|
||||||
|
|
||||||
|
log.info("--- Loading interpreter ---", .{});
|
||||||
|
var interp_buffer: [128]u8 = undefined;
|
||||||
|
var interp_reader = interp.reader(&interp_buffer);
|
||||||
|
const interp_ehdr = try elf.Header.read(&interp_reader.interface);
|
||||||
|
assert(interp_ehdr.type == elf.ET.DYN);
|
||||||
|
const interp_base = try loadStaticElf(interp_ehdr, &interp_reader);
|
||||||
|
maybe_interp_base = interp_base;
|
||||||
|
maybe_interp_entry = interp_ehdr.entry + if (interp_ehdr.type == .DYN) interp_base else 0;
|
||||||
|
log.info(
|
||||||
|
"Interpreter loaded: base=0x{x}, entry=0x{x}",
|
||||||
|
.{ interp_base, maybe_interp_entry.? },
|
||||||
|
);
|
||||||
|
interp.close();
|
||||||
|
}
|
||||||
|
|
||||||
|
var i: usize = 0;
|
||||||
|
const auxv = std.os.linux.elf_aux_maybe.?;
|
||||||
|
while (auxv[i].a_type != elf.AT_NULL) : (i += 1) {
|
||||||
|
// TODO: look at other auxv types and check if we need to change them.
|
||||||
|
auxv[i].a_un.a_val = switch (auxv[i].a_type) {
|
||||||
|
elf.AT_PHDR => base + ehdr.phoff,
|
||||||
|
elf.AT_PHENT => ehdr.phentsize,
|
||||||
|
elf.AT_PHNUM => ehdr.phnum,
|
||||||
|
elf.AT_BASE => maybe_interp_base orelse auxv[i].a_un.a_val,
|
||||||
|
elf.AT_ENTRY => entry,
|
||||||
|
elf.AT_EXECFN => @intFromPtr(std.os.argv[arg_index]),
|
||||||
|
else => auxv[i].a_un.a_val,
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
// The stack layout provided by the kernel is:
|
||||||
|
// argc, argv..., NULL, envp..., NULL, auxv...
|
||||||
|
// We need to shift this block of memory to remove the loader's own arguments before we jump to
|
||||||
|
// the new executable.
|
||||||
|
// The end of the block is one entry past the AT_NULL entry in auxv.
|
||||||
|
const end_of_auxv = &auxv[i + 1];
|
||||||
|
const dest_ptr = @as([*]u8, @ptrCast(std.os.argv.ptr));
|
||||||
|
const src_ptr = @as([*]u8, @ptrCast(&std.os.argv[arg_index]));
|
||||||
|
const len = @intFromPtr(end_of_auxv) - @intFromPtr(src_ptr);
|
||||||
|
log.debug(
|
||||||
|
"Copying stack from {*} to {*} with length 0x{x}",
|
||||||
|
.{ src_ptr, dest_ptr, len },
|
||||||
|
);
|
||||||
|
assert(@intFromPtr(dest_ptr) < @intFromPtr(src_ptr));
|
||||||
|
std.mem.copyForwards(u8, dest_ptr[0..len], src_ptr[0..len]);
|
||||||
|
|
||||||
|
// `std.os.argv.ptr` points to the argv pointers. The word just before it is argc and also the
|
||||||
|
// start of the stack.
|
||||||
|
const argc: [*]usize = @as([*]usize, @ptrCast(@alignCast(&std.os.argv.ptr[0]))) - 1;
|
||||||
|
argc[0] = std.os.argv.len - arg_index;
|
||||||
|
log.debug("new argc: {x}", .{argc[0]});
|
||||||
|
|
||||||
|
const final_entry = maybe_interp_entry orelse entry;
|
||||||
|
log.info("Trampolining to final entry: 0x{x} with sp: {*}", .{ final_entry, argc });
|
||||||
|
trampoline(final_entry, argc);
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Loads all `PT_LOAD` segments of an ELF file into memory.
|
||||||
|
///
|
||||||
|
/// For `ET_EXEC` (non-PIE), segments are mapped at their fixed virtual addresses (`p_vaddr`).
|
||||||
|
/// For `ET_DYN` (PIE), segments are mapped at a random base address chosen by the kernel.
|
||||||
|
///
|
||||||
|
/// It handles zero-initialized(e.g., .bss) sections by mapping anonymous memory and only reading
|
||||||
|
/// `p_filesz` bytes from the file, ensuring `p_memsz` bytes are allocated.
|
||||||
|
fn loadStaticElf(ehdr: elf.Header, file_reader: *std.fs.File.Reader) !usize {
|
||||||
|
// NOTE: In theory we could also just look at the first and last loadable segment because the
|
||||||
|
// ELF spec mandates these to be in ascending order of `p_vaddr`, but better be safe than sorry.
|
||||||
|
// https://gabi.xinuos.com/elf/08-pheader.html#:~:text=ascending%20order
|
||||||
|
const minva, const maxva = bounds: {
|
||||||
|
var minva: u64 = std.math.maxInt(u64);
|
||||||
|
var maxva: u64 = 0;
|
||||||
|
var phdrs = ehdr.iterateProgramHeaders(file_reader);
|
||||||
|
while (try phdrs.next()) |phdr| {
|
||||||
|
if (phdr.p_type != elf.PT_LOAD) continue;
|
||||||
|
minva = @min(minva, phdr.p_vaddr);
|
||||||
|
maxva = @max(maxva, phdr.p_vaddr + phdr.p_memsz);
|
||||||
|
}
|
||||||
|
minva = mem.alignBackward(usize, minva, page_size);
|
||||||
|
maxva = mem.alignForward(usize, maxva, page_size);
|
||||||
|
log.debug("Calculated bounds: minva=0x{x}, maxva=0x{x}", .{ minva, maxva });
|
||||||
|
break :bounds .{ minva, maxva };
|
||||||
|
};
|
||||||
|
|
||||||
|
// Check, that the needed memory region can be allocated as a whole. We do this
|
||||||
|
const dynamic = ehdr.type == elf.ET.DYN;
|
||||||
|
log.debug("ELF type is {s}", .{if (dynamic) "DYN" else "EXEC (static)"});
|
||||||
|
const hint = if (dynamic) null else @as(?[*]align(page_size) u8, @ptrFromInt(minva));
|
||||||
|
log.debug("mmap pre-flight hint: {*}", .{hint});
|
||||||
|
const base = try posix.mmap(
|
||||||
|
hint,
|
||||||
|
maxva - minva,
|
||||||
|
posix.PROT.NONE,
|
||||||
|
.{ .TYPE = .PRIVATE, .ANONYMOUS = true, .FIXED = !dynamic },
|
||||||
|
-1,
|
||||||
|
0,
|
||||||
|
);
|
||||||
|
log.debug("Pre-flight reservation at: {*}, size: 0x{x}", .{ base.ptr, base.len });
|
||||||
|
posix.munmap(base);
|
||||||
|
|
||||||
|
const flags = posix.MAP{ .TYPE = .PRIVATE, .ANONYMOUS = true, .FIXED = true };
|
||||||
|
var phdrs = ehdr.iterateProgramHeaders(file_reader);
|
||||||
|
var phdr_idx: u32 = 0;
|
||||||
|
errdefer posix.munmap(base);
|
||||||
|
while (try phdrs.next()) |phdr| : (phdr_idx += 1) {
|
||||||
|
if (phdr.p_type != elf.PT_LOAD) continue;
|
||||||
|
if (phdr.p_memsz == 0) continue;
|
||||||
|
|
||||||
|
const offset = phdr.p_vaddr & (page_size - 1);
|
||||||
|
const size = mem.alignForward(usize, phdr.p_memsz + offset, page_size);
|
||||||
|
var start = mem.alignBackward(usize, phdr.p_vaddr, page_size);
|
||||||
|
const base_for_dyn = if (dynamic) @intFromPtr(base.ptr) else 0;
|
||||||
|
start += base_for_dyn;
|
||||||
|
log.debug(
|
||||||
|
" - phdr[{}]: mapping 0x{x} bytes at 0x{x} (vaddr=0x{x}, dyn_base=0x{x})",
|
||||||
|
.{ phdr_idx, size, start, phdr.p_vaddr, base_for_dyn },
|
||||||
|
);
|
||||||
|
// NOTE: We can't use a single file-backed mmap for the segment, because p_memsz may be
|
||||||
|
// larger than p_filesz. This difference accounts for the .bss section, which must be
|
||||||
|
// zero-initialized.
|
||||||
|
const ptr = try posix.mmap(
|
||||||
|
@as(?[*]align(page_size) u8, @ptrFromInt(start)),
|
||||||
|
size,
|
||||||
|
posix.PROT.WRITE,
|
||||||
|
flags,
|
||||||
|
-1,
|
||||||
|
0,
|
||||||
|
);
|
||||||
|
try file_reader.seekTo(phdr.p_offset);
|
||||||
|
if (try file_reader.read(ptr[offset..][0..phdr.p_filesz]) != phdr.p_filesz)
|
||||||
|
return UnfinishedReadError.UnfinishedRead;
|
||||||
|
|
||||||
|
const protections = elfToMmapProt(phdr.p_flags);
|
||||||
|
if (protections & posix.PROT.EXEC > 0) {
|
||||||
|
log.info("Patching executable segment", .{});
|
||||||
|
try patcher.patchRegion(ptr);
|
||||||
|
}
|
||||||
|
try posix.mprotect(ptr, protections);
|
||||||
|
}
|
||||||
|
log.debug("loadElf returning base: 0x{x}", .{@intFromPtr(base.ptr)});
|
||||||
|
return @intFromPtr(base.ptr);
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Converts ELF program header protection flags to mmap protection flags.
|
||||||
|
fn elfToMmapProt(elf_prot: u64) u32 {
|
||||||
|
var result: u32 = posix.PROT.NONE;
|
||||||
|
if ((elf_prot & elf.PF_R) != 0) result |= posix.PROT.READ;
|
||||||
|
if ((elf_prot & elf.PF_W) != 0) result |= posix.PROT.WRITE;
|
||||||
|
if ((elf_prot & elf.PF_X) != 0) result |= posix.PROT.EXEC;
|
||||||
|
return result;
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Opens the file by either opening via a (absolute or relative) path or searching through `PATH`
|
||||||
|
/// for a file with the name.
|
||||||
|
// TODO: support paths starting with ~
|
||||||
|
fn lookupFile(path_or_name: []const u8) !std.fs.File {
|
||||||
|
// If filename contains a slash ("/"), then it is interpreted as a pathname.
|
||||||
|
if (std.mem.indexOfScalarPos(u8, path_or_name, 0, '/')) |_| {
|
||||||
|
const fd = try posix.open(path_or_name, .{ .ACCMODE = .RDONLY, .CLOEXEC = true }, 0);
|
||||||
|
return .{ .handle = fd };
|
||||||
|
}
|
||||||
|
|
||||||
|
// If it has no slash we need to look it up in PATH.
|
||||||
|
if (posix.getenvZ("PATH")) |env_path| {
|
||||||
|
var paths = std.mem.tokenizeScalar(u8, env_path, ':');
|
||||||
|
while (paths.next()) |p| {
|
||||||
|
var dir = std.fs.openDirAbsolute(p, .{}) catch continue;
|
||||||
|
defer dir.close();
|
||||||
|
const fd = posix.openat(dir.fd, path_or_name, .{
|
||||||
|
.ACCMODE = .RDONLY,
|
||||||
|
.CLOEXEC = true,
|
||||||
|
}, 0) catch continue;
|
||||||
|
return .{ .handle = fd };
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return error.FileNotFound;
|
||||||
|
}
|
||||||
|
|
||||||
|
/// This function performs the final jump into the loaded program (amd64)
|
||||||
|
// TODO: support more architectures
|
||||||
|
fn trampoline(entry: usize, sp: [*]usize) noreturn {
|
||||||
|
asm volatile (
|
||||||
|
\\ mov %[sp], %%rsp
|
||||||
|
\\ jmp *%[entry]
|
||||||
|
: // No outputs
|
||||||
|
: [entry] "r" (entry),
|
||||||
|
[sp] "r" (sp),
|
||||||
|
: .{ .rsp = true, .memory = true });
|
||||||
|
unreachable;
|
||||||
|
}
|
||||||
|
|
||||||
|
test {
|
||||||
|
_ = @import("AddressAllocator.zig");
|
||||||
|
_ = @import("Range.zig");
|
||||||
|
_ = @import("PatchLocationIterator.zig");
|
||||||
|
}
|
||||||
54990
src/vendor/Zydis.c
vendored
Normal file
54990
src/vendor/Zydis.c
vendored
Normal file
File diff suppressed because one or more lines are too long
12113
src/vendor/Zydis.h
vendored
Normal file
12113
src/vendor/Zydis.h
vendored
Normal file
File diff suppressed because it is too large
Load Diff
4
src/vendor/zydis.zig
vendored
Normal file
4
src/vendor/zydis.zig
vendored
Normal file
@@ -0,0 +1,4 @@
|
|||||||
|
// Separate module to always compile it with a release mode.
|
||||||
|
pub const zydis = @cImport({
|
||||||
|
@cInclude("Zydis.h");
|
||||||
|
});
|
||||||
Reference in New Issue
Block a user