🐦 iBSS iBEC LLB TXM

update

update
This commit is contained in:
Lakr
2026-03-11 02:57:28 +08:00
parent e189b80cf7
commit 08eb9d260f
69 changed files with 11358 additions and 3 deletions

15
.gitmodules vendored
View File

@@ -1,3 +1,18 @@
[submodule "scripts/resources"]
path = scripts/resources
url = https://github.com/Lakr233/vphone-cli-storage.git
[submodule "vendor/libcapstone-spm"]
path = vendor/libcapstone-spm
url = https://github.com/Lakr233/libcapstone-spm.git
[submodule "vendor/libimg4-spm"]
path = vendor/libimg4-spm
url = https://github.com/Lakr233/libimg4-spm.git
[submodule "vendor/MachOKit"]
path = vendor/MachOKit
url = https://github.com/p-x9/MachOKit.git
[submodule "vendor/Dynamic"]
path = vendor/Dynamic
url = https://github.com/mhdhejazi/Dynamic.git
[submodule "vendor/swift-argument-parser"]
path = vendor/swift-argument-parser
url = https://github.com/apple/swift-argument-parser.git

View File

@@ -9,15 +9,28 @@ let package = Package(
],
products: [],
dependencies: [
.package(url: "https://github.com/apple/swift-argument-parser", from: "1.3.1"),
.package(url: "https://github.com/mhdhejazi/Dynamic", from: "1.2.0"),
.package(path: "vendor/swift-argument-parser"),
.package(path: "vendor/Dynamic"),
.package(path: "vendor/libcapstone-spm"),
.package(path: "vendor/libimg4-spm"),
.package(path: "vendor/MachOKit"),
],
targets: [
.target(
name: "FirmwarePatcher",
dependencies: [
.product(name: "Capstone", package: "libcapstone-spm"),
.product(name: "Img4tool", package: "libimg4-spm"),
.product(name: "MachOKit", package: "MachOKit"),
],
path: "sources/FirmwarePatcher"
),
.executableTarget(
name: "vphone-cli",
dependencies: [
.product(name: "ArgumentParser", package: "swift-argument-parser"),
.product(name: "Dynamic", package: "Dynamic"),
"FirmwarePatcher",
],
path: "sources/vphone-cli",
linkerSettings: [
@@ -28,5 +41,10 @@ let package = Package(
.linkedFramework("AVFoundation"),
]
),
.testTarget(
name: "FirmwarePatcherTests",
dependencies: ["FirmwarePatcher"],
path: "tests/FirmwarePatcherTests"
),
]
)

View File

@@ -0,0 +1,240 @@
#!/usr/bin/env python3
"""Generate patch reference JSON for each firmware component.
Runs each Python patcher in dry-run mode (find patches but don't apply)
and exports the patch sites with offsets and bytes as JSON.
Usage:
source .venv/bin/activate
python3 scripts/export_patch_reference.py ipsws/patch_refactor_input
"""
import json
import os
import struct
import sys
sys.path.insert(0, os.path.join(os.path.dirname(__file__)))
from capstone import Cs, CS_ARCH_ARM64, CS_MODE_LITTLE_ENDIAN
_cs = Cs(CS_ARCH_ARM64, CS_MODE_LITTLE_ENDIAN)
_cs.detail = True
def disasm_one(data, off):
insns = list(_cs.disasm(bytes(data[off:off + 4]), off))
return insns[0] if insns else None
def disasm_bytes(b, addr=0):
insns = list(_cs.disasm(bytes(b), addr))
if insns:
return f"{insns[0].mnemonic} {insns[0].op_str}"
return "???"
def patches_to_json(patches, component):
"""Convert list of (offset, patch_bytes, description) to JSON-serializable records."""
records = []
for off, pb, desc in patches:
records.append({
"file_offset": off,
"patch_bytes": pb.hex(),
"patch_size": len(pb),
"description": desc,
"component": component,
})
return records
def load_firmware(path):
"""Load firmware file, decompress IM4P if needed."""
with open(path, "rb") as f:
raw = f.read()
try:
from pyimg4 import IM4P
im4p = IM4P(raw)
if im4p.payload.compression:
im4p.payload.decompress()
return bytearray(im4p.payload.data)
except Exception:
return bytearray(raw)
def export_avpbooter(base_dir, out_dir):
"""Export AVPBooter patch reference."""
import glob
paths = glob.glob(os.path.join(base_dir, "AVPBooter*.bin"))
if not paths:
print(" [!] AVPBooter not found, skipping")
return
path = paths[0]
data = bytearray(open(path, "rb").read())
print(f" AVPBooter: {path} ({len(data)} bytes)")
# Inline the AVPBooter patcher logic (from fw_patch.py)
from keystone import Ks, KS_ARCH_ARM64, KS_MODE_LITTLE_ENDIAN
_ks = Ks(KS_ARCH_ARM64, KS_MODE_LITTLE_ENDIAN)
def asm(s):
enc, _ = _ks.asm(s)
return bytes(enc)
patches = []
DGST = struct.pack("<I", 0x44475354)
off = data.find(DGST)
if off < 0:
print(" [!] AVPBooter: DGST marker not found")
return
insns = list(_cs.disasm(bytes(data[off:off + 0x200]), off, 50))
for i, ins in enumerate(insns):
if ins.mnemonic == "ret":
prev = insns[i - 1] if i > 0 else None
if prev and prev.mnemonic == "mov" and "x0" in prev.op_str:
patches.append((prev.address, asm("mov x0, #0"),
"AVPBooter DGST bypass: mov x0, #0"))
break
records = patches_to_json(patches, "avpbooter")
out_path = os.path.join(out_dir, "avpbooter.json")
with open(out_path, "w") as f:
json.dump(records, f, indent=2)
print(f"{out_path} ({len(records)} patches)")
def export_iboot(base_dir, out_dir):
"""Export iBSS/iBEC/LLB patch references."""
from patchers.iboot import IBootPatcher
components = [
("ibss", "Firmware/dfu/iBSS.vresearch101.RELEASE.im4p"),
("ibec", "Firmware/dfu/iBEC.vresearch101.RELEASE.im4p"),
("llb", "Firmware/all_flash/LLB.vresearch101.RELEASE.im4p"),
]
for mode, rel_path in components:
path = os.path.join(base_dir, rel_path)
if not os.path.exists(path):
print(f" [!] {mode}: {rel_path} not found, skipping")
continue
data = load_firmware(path)
print(f" {mode}: {rel_path} ({len(data)} bytes)")
patcher = IBootPatcher(data, mode=mode, verbose=True)
patcher.find_all()
records = patches_to_json(patcher.patches, mode)
out_path = os.path.join(out_dir, f"{mode}.json")
with open(out_path, "w") as f:
json.dump(records, f, indent=2)
print(f"{out_path} ({len(records)} patches)")
def export_txm(base_dir, out_dir):
"""Export TXM patch reference."""
from patchers.txm import TXMPatcher as TXMBasePatcher
path = os.path.join(base_dir, "Firmware/txm.iphoneos.research.im4p")
if not os.path.exists(path):
print(" [!] TXM not found, skipping")
return
data = load_firmware(path)
print(f" TXM: ({len(data)} bytes)")
patcher = TXMBasePatcher(data, verbose=True)
patcher.find_all()
records = patches_to_json(patcher.patches, "txm")
out_path = os.path.join(out_dir, "txm.json")
with open(out_path, "w") as f:
json.dump(records, f, indent=2)
print(f"{out_path} ({len(records)} patches)")
def export_kernel(base_dir, out_dir):
"""Export kernel patch reference."""
from patchers.kernel import KernelPatcher
path = os.path.join(base_dir, "kernelcache.research.vphone600")
if not os.path.exists(path):
print(" [!] kernelcache not found, skipping")
return
data = load_firmware(path)
print(f" kernelcache: ({len(data)} bytes)")
patcher = KernelPatcher(data, verbose=True)
patcher.find_all()
records = patches_to_json(patcher.patches, "kernelcache")
out_path = os.path.join(out_dir, "kernelcache.json")
with open(out_path, "w") as f:
json.dump(records, f, indent=2)
print(f"{out_path} ({len(records)} patches)")
def export_dtree(base_dir, out_dir):
"""Export DeviceTree patch reference."""
import dtree
path = os.path.join(base_dir, "Firmware/all_flash/DeviceTree.vphone600ap.im4p")
if not os.path.exists(path):
print(" [!] DeviceTree not found, skipping")
return
data = load_firmware(path)
print(f" DeviceTree: ({len(data)} bytes)")
# dtree.patch_device_tree_payload returns list of patches
patches = dtree.find_patches(data)
records = []
for off, old_bytes, new_bytes, desc in patches:
records.append({
"file_offset": off,
"original_bytes": old_bytes.hex() if isinstance(old_bytes, (bytes, bytearray)) else old_bytes,
"patch_bytes": new_bytes.hex() if isinstance(new_bytes, (bytes, bytearray)) else new_bytes,
"patch_size": len(new_bytes) if isinstance(new_bytes, (bytes, bytearray)) else 0,
"description": desc,
"component": "devicetree",
})
out_path = os.path.join(out_dir, "devicetree.json")
with open(out_path, "w") as f:
json.dump(records, f, indent=2)
print(f"{out_path} ({len(records)} patches)")
def main():
if len(sys.argv) < 2:
print(f"Usage: {sys.argv[0]} <firmware_dir>")
sys.exit(1)
base_dir = os.path.abspath(sys.argv[1])
out_dir = os.path.join(base_dir, "reference_patches")
os.makedirs(out_dir, exist_ok=True)
print(f"=== Exporting patch references from {base_dir} ===\n")
# Change to scripts dir so imports work
os.chdir(os.path.join(os.path.dirname(__file__)))
export_avpbooter(base_dir, out_dir)
print()
export_iboot(base_dir, out_dir)
print()
export_txm(base_dir, out_dir)
print()
export_kernel(base_dir, out_dir)
print()
# DeviceTree needs special handling - the dtree.py may not have find_patches
# We'll handle it separately
print(f"\n=== Done. References saved to {out_dir}/ ===")
if __name__ == "__main__":
main()

View File

@@ -0,0 +1,150 @@
#!/usr/bin/env python3
"""Generate patch reference JSON for ALL variants (regular + dev + jb).
Usage:
source .venv/bin/activate
python3 scripts/export_patch_reference_all.py ipsws/patch_refactor_input
"""
import json
import os
import struct
import sys
sys.path.insert(0, os.path.join(os.path.dirname(__file__)))
from capstone import Cs, CS_ARCH_ARM64, CS_MODE_LITTLE_ENDIAN
_cs = Cs(CS_ARCH_ARM64, CS_MODE_LITTLE_ENDIAN)
_cs.detail = True
def disasm_one(data, off):
insns = list(_cs.disasm(bytes(data[off:off + 4]), off))
return insns[0] if insns else None
def patches_to_json(patches, component):
records = []
for off, pb, desc in patches:
records.append({
"file_offset": off,
"patch_bytes": pb.hex(),
"patch_size": len(pb),
"description": desc,
"component": component,
})
return records
def load_firmware(path):
with open(path, "rb") as f:
raw = f.read()
try:
from pyimg4 import IM4P
im4p = IM4P(raw)
if im4p.payload.compression:
im4p.payload.decompress()
return bytearray(im4p.payload.data)
except Exception:
return bytearray(raw)
def export_txm_dev(base_dir, out_dir):
"""Export TXM dev patch reference (base + dev patches)."""
from patchers.txm import TXMPatcher as TXMBasePatcher
from patchers.txm_dev import TXMPatcher as TXMDevPatcher
path = os.path.join(base_dir, "Firmware/txm.iphoneos.research.im4p")
if not os.path.exists(path):
print(" [!] TXM not found, skipping txm_dev")
return
data = load_firmware(path)
print(f" TXM dev: ({len(data)} bytes)")
# Base TXM patches
base = TXMBasePatcher(data, verbose=True)
base.find_all()
base_records = patches_to_json(base.patches, "txm_dev_base")
# Dev TXM patches (on same data, without applying base)
dev = TXMDevPatcher(bytearray(data), verbose=True)
dev.find_all()
dev_records = patches_to_json(dev.patches, "txm_dev")
out_path = os.path.join(out_dir, "txm_dev.json")
with open(out_path, "w") as f:
json.dump({"base": base_records, "dev": dev_records}, f, indent=2)
print(f"{out_path} ({len(base_records)} base + {len(dev_records)} dev patches)")
def export_iboot_jb(base_dir, out_dir):
"""Export iBSS JB patch reference."""
from patchers.iboot_jb import IBootJBPatcher
path = os.path.join(base_dir, "Firmware/dfu/iBSS.vresearch101.RELEASE.im4p")
if not os.path.exists(path):
print(" [!] iBSS not found, skipping iboot_jb")
return
data = load_firmware(path)
print(f" iBSS JB: ({len(data)} bytes)")
patcher = IBootJBPatcher(data, mode="ibss", verbose=True)
# Only find JB patches (not base)
patcher.patches = []
patcher.patch_skip_generate_nonce()
records = patches_to_json(patcher.patches, "ibss_jb")
out_path = os.path.join(out_dir, "ibss_jb.json")
with open(out_path, "w") as f:
json.dump(records, f, indent=2)
print(f"{out_path} ({len(records)} patches)")
def export_kernel_jb(base_dir, out_dir):
"""Export kernel JB patch reference."""
from patchers.kernel_jb import KernelJBPatcher
path = os.path.join(base_dir, "kernelcache.research.vphone600")
if not os.path.exists(path):
print(" [!] kernelcache not found, skipping kernel_jb")
return
data = load_firmware(path)
print(f" kernelcache JB: ({len(data)} bytes)")
patcher = KernelJBPatcher(data, verbose=True)
patches = patcher.find_all()
records = patches_to_json(patches, "kernelcache_jb")
out_path = os.path.join(out_dir, "kernelcache_jb.json")
with open(out_path, "w") as f:
json.dump(records, f, indent=2)
print(f"{out_path} ({len(records)} patches)")
def main():
if len(sys.argv) < 2:
print(f"Usage: {sys.argv[0]} <firmware_dir>")
sys.exit(1)
base_dir = os.path.abspath(sys.argv[1])
out_dir = os.path.join(base_dir, "reference_patches")
os.makedirs(out_dir, exist_ok=True)
print(f"=== Exporting dev/jb patch references from {base_dir} ===\n")
os.chdir(os.path.join(os.path.dirname(__file__)))
export_txm_dev(base_dir, out_dir)
print()
export_iboot_jb(base_dir, out_dir)
print()
export_kernel_jb(base_dir, out_dir)
print(f"\n=== Done. References saved to {out_dir}/ ===")
if __name__ == "__main__":
main()

View File

@@ -0,0 +1,164 @@
// ARM64Constants.swift Pre-encoded ARM64 instruction constants.
//
// Every constant was generated by keystone-engine and verified
// via capstone round-trip disassembly. Do NOT edit raw bytes
// without updating the corresponding test case.
import Foundation
// MARK: - ARM64 Instruction Constants
public enum ARM64 {
// MARK: Fixed Instructions
/// NOP no operation
public static let nop = encodeU32(0xD503_201F)
/// RET return from subroutine
public static let ret = encodeU32(0xD65F_03C0)
/// RETAA return with pointer authentication (A key)
public static let retaa = encodeU32(0xD65F_0BFF)
/// RETAB return with pointer authentication (B key)
public static let retab = encodeU32(0xD65F_0FFF)
/// PACIBSP sign LR with B key using SP (hint #27)
public static let pacibsp = encodeU32(0xD503_237F)
// MARK: MOV Variants
/// MOV X0, #0 (MOVZ X0, #0)
public static let movX0_0 = encodeU32(0xD280_0000)
/// MOV X0, #1 (MOVZ X0, #1)
public static let movX0_1 = encodeU32(0xD280_0020)
/// MOV W0, #0 (MOVZ W0, #0)
public static let movW0_0 = encodeU32(0x5280_0000)
/// MOV W0, #1 (MOVZ W0, #1)
public static let movW0_1 = encodeU32(0x5280_0020)
/// MOV X0, X20 (ORR X0, XZR, X20)
public static let movX0X20 = encodeU32(0xAA14_03E0)
/// MOV W0, #0xA1 (MOVZ W0, #0xA1)
public static let movW0_0xA1 = encodeU32(0x5280_1420)
// MARK: Compare
/// CMP W0, W0 (SUBS WZR, W0, W0)
public static let cmpW0W0 = encodeU32(0x6B00_001F)
/// CMP X0, X0 (SUBS XZR, X0, X0)
public static let cmpX0X0 = encodeU32(0xEB00_001F)
// MARK: Memory
/// STRB W0, [X20, #0x30]
public static let strbW0X20_30 = encodeU32(0x3900_C280)
// MARK: JB Constants
/// CBZ X2, #8 (branch if X2 == 0, skip 2 instructions)
public static let cbzX2_8 = encodeU32(0xB400_0042)
/// STR X0, [X2]
public static let strX0X2 = encodeU32(0xF900_0040)
/// CMP XZR, XZR (SUBS XZR, XZR, XZR always sets Z flag)
public static let cmpXzrXzr = encodeU32(0xEB1F_03FF)
/// MOV X8, XZR (ORR X8, XZR, XZR)
public static let movX8Xzr = encodeU32(0xAA1F_03E8)
// MARK: Common Prologue/Epilogue Patterns (verified via keystone)
/// STP X29, X30, [SP, #-0x10]! (push frame)
static let stpFP_LR_pre: UInt32 = 0xA9BF_7BFD
/// MOV X29, SP (set frame pointer)
static let movFP_SP: UInt32 = 0x9100_03FD
// MARK: IOUC MACF Aggregator Shape
/// LDR X10, [X10, #0x9E8] (mac_policy_list slot load)
static let ldr_x10_x10_0x9e8: UInt32 = 0xF944_F54A
/// BLRAA X10, SP (authenticated indirect call)
static let blraa_x10: UInt32 = 0xD73F_0940
/// BLRAB X10, SP
static let blrab_x10: UInt32 = 0xD73F_0D40
/// BLR X10
static let blr_x10: UInt32 = 0xD63F_0140
// MARK: C23 Cave Instructions (verified via keystone)
//
// These are the fixed instructions used in the faithful upstream C23
// shellcode cave (vnode getattr uid/gid/P_SUGID fixup).
// Position-dependent BL/B instructions are NOT included here they are
// encoded at build time by ARM64Encoder.
static let c23_cbzX3_0xA8: UInt32 = 0xB400_0543 // cbz x3, #+0xa8
static let c23_subSP_0x400: UInt32 = 0xD110_03FF // sub sp, sp, #0x400
static let c23_stpFP_LR: UInt32 = 0xA900_7BFD // stp x29, x30, [sp]
static let c23_stpX0X1_0x10: UInt32 = 0xA901_07E0 // stp x0, x1, [sp, #0x10]
static let c23_stpX2X3_0x20: UInt32 = 0xA902_0FE2 // stp x2, x3, [sp, #0x20]
static let c23_stpX4X5_0x30: UInt32 = 0xA903_17E4 // stp x4, x5, [sp, #0x30]
static let c23_stpX6X7_0x40: UInt32 = 0xA904_1FE6 // stp x6, x7, [sp, #0x40]
static let c23_movX2_X0: UInt32 = 0xAA00_03E2 // mov x2, x0
static let c23_ldrX0_sp_0x28: UInt32 = 0xF940_17E0 // ldr x0, [sp, #0x28]
static let c23_addX1_sp_0x80: UInt32 = 0x9102_03E1 // add x1, sp, #0x80
static let c23_movzW8_0x380: UInt32 = 0x5280_7008 // movz w8, #0x380
static let c23_stpXZR_X8: UInt32 = 0xA900_203F // stp xzr, x8, [x1]
static let c23_stpXZR_XZR_0x10: UInt32 = 0xA901_7C3F // stp xzr, xzr, [x1, #0x10]
static let c23_cbnzX0_0x4c: UInt32 = 0xB500_0260 // cbnz x0, #+0x4c
static let c23_movW2_0: UInt32 = 0x5280_0002 // mov w2, #0
static let c23_ldrW8_sp_0xcc: UInt32 = 0xB940_CFE8 // ldr w8, [sp, #0xcc]
static let c23_tbzW8_11_0x14: UInt32 = 0x3658_00A8 // tbz w8, #0xb, #+0x14
static let c23_ldrW8_sp_0xc4: UInt32 = 0xB940_C7E8 // ldr w8, [sp, #0xc4]
static let c23_ldrX0_sp_0x18: UInt32 = 0xF940_0FE0 // ldr x0, [sp, #0x18]
static let c23_strW8_x0_0x18: UInt32 = 0xB900_1808 // str w8, [x0, #0x18]
static let c23_movW2_1: UInt32 = 0x5280_0022 // mov w2, #1
static let c23_tbzW8_10_0x14: UInt32 = 0x3650_00A8 // tbz w8, #0xa, #+0x14
static let c23_ldrW8_sp_0xc8: UInt32 = 0xB940_CBE8 // ldr w8, [sp, #0xc8]
static let c23_strW8_x0_0x28: UInt32 = 0xB900_2808 // str w8, [x0, #0x28]
static let c23_cbzW2_0x14: UInt32 = 0x3400_00A2 // cbz w2, #+0x14
static let c23_ldrX0_sp_0x20: UInt32 = 0xF940_13E0 // ldr x0, [sp, #0x20]
static let c23_ldrW8_x0_0x454: UInt32 = 0xB944_5408 // ldr w8, [x0, #0x454]
static let c23_orrW8_0x100: UInt32 = 0x3218_0108 // orr w8, w8, #0x100
static let c23_strW8_x0_0x454: UInt32 = 0xB904_5408 // str w8, [x0, #0x454]
static let c23_ldpX0X1_0x10: UInt32 = 0xA941_07E0 // ldp x0, x1, [sp, #0x10]
static let c23_ldpX2X3_0x20: UInt32 = 0xA942_0FE2 // ldp x2, x3, [sp, #0x20]
static let c23_ldpX4X5_0x30: UInt32 = 0xA943_17E4 // ldp x4, x5, [sp, #0x30]
static let c23_ldpX6X7_0x40: UInt32 = 0xA944_1FE6 // ldp x6, x7, [sp, #0x40]
static let c23_ldpFP_LR: UInt32 = 0xA940_7BFD // ldp x29, x30, [sp]
static let c23_addSP_0x400: UInt32 = 0x9110_03FF // add sp, sp, #0x400
// MARK: vfs_context_current Shape (verified via keystone)
/// mrs x0, tpidr_el1
static let mrs_x0_tpidr_el1: UInt32 = 0xD538_D080
/// ldr x1, [x0, #0x3e0]
static let ldr_x1_x0_0x3e0: UInt32 = 0xF941_F001
// MARK: UInt32 Values (for pattern matching)
public static let nopU32: UInt32 = 0xD503_201F
public static let retU32: UInt32 = 0xD65F_03C0
public static let retaaU32: UInt32 = 0xD65F_0BFF
public static let retabU32: UInt32 = 0xD65F_0FFF
public static let pacibspU32: UInt32 = 0xD503_237F
/// Set of instruction uint32 values that indicate function boundaries.
public static let funcBoundaryU32s: Set<UInt32> = [
retU32, retaaU32, retabU32, pacibspU32,
]
// MARK: - Helpers
@inlinable
static func encodeU32(_ value: UInt32) -> Data {
withUnsafeBytes(of: value.littleEndian) { Data($0) }
}
}

View File

@@ -0,0 +1,57 @@
// ARM64Disassembler.swift Capstone wrapper for ARM64 disassembly.
import Capstone
import Foundation
public final class ARM64Disassembler: Sendable {
/// Shared singleton instance with detail mode enabled.
public static let shared: ARM64Disassembler = .init()
private let cs: Disassembler
public init() {
// CS_ARCH_AARCH64 and CS_MODE_LITTLE_ENDIAN are the correct constants
cs = try! Disassembler(arch: CS_ARCH_AARCH64, mode: CS_MODE_LITTLE_ENDIAN)
cs.detail = true
cs.skipData = true
}
/// Disassemble instructions from data starting at the given virtual address.
///
/// - Parameters:
/// - data: Raw instruction bytes.
/// - address: Virtual address of the first byte.
/// - count: Maximum number of instructions to disassemble (0 = all).
/// - Returns: Array of disassembled instructions.
public func disassemble(_ data: Data, at address: UInt64 = 0, count: Int = 0) -> [Instruction] {
cs.disassemble(code: data, address: address, count: count)
}
/// Disassemble a single 4-byte instruction at the given address.
public func disassembleOne(_ data: Data, at address: UInt64 = 0) -> Instruction? {
let insns = cs.disassemble(code: data, address: address, count: 1)
return insns.first
}
/// Disassemble a single instruction from a buffer at a file offset.
public func disassembleOne(in buffer: Data, at offset: Int, address: UInt64? = nil) -> Instruction? {
guard offset >= 0, offset + 4 <= buffer.count else { return nil }
let slice = buffer[offset ..< offset + 4]
let addr = address ?? UInt64(offset)
return disassembleOne(Data(slice), at: addr)
}
/// Disassemble `count` instructions starting at file offset.
public func disassemble(in buffer: Data, at offset: Int, count: Int, address: UInt64? = nil) -> [Instruction] {
let byteCount = count * 4
guard offset >= 0, offset + byteCount <= buffer.count else { return [] }
let slice = buffer[offset ..< offset + byteCount]
let addr = address ?? UInt64(offset)
return disassemble(Data(slice), at: addr, count: count)
}
/// Return the canonical name string for an AArch64 register ID (e.g. "x0", "w1", "wzr").
public func registerName(_ regID: UInt32) -> String? {
cs.registerName(regID)
}
}

View File

@@ -0,0 +1,98 @@
// ARM64Encoder.swift PC-relative instruction encoding for ARM64.
//
// Replaces keystone-engine _asm_at() for branch/ADRP/ADD encoding.
// Each encoder produces a 4-byte little-endian Data value.
import Foundation
public enum ARM64Encoder {
// MARK: - Branch Encoding
/// Encode unconditional B (branch) instruction.
///
/// Format: `[31:26] = 0b000101`, `[25:0] = signed offset / 4`
/// Range: +/-128 MB
public static func encodeB(from pc: Int, to target: Int) -> Data? {
let delta = (target - pc)
guard delta & 0x3 == 0 else { return nil }
let imm26 = delta >> 2
guard imm26 >= -(1 << 25), imm26 < (1 << 25) else { return nil }
let insn: UInt32 = 0x1400_0000 | (UInt32(bitPattern: Int32(imm26)) & 0x03FF_FFFF)
return ARM64.encodeU32(insn)
}
/// Encode BL (branch with link) instruction.
///
/// Format: `[31:26] = 0b100101`, `[25:0] = signed offset / 4`
/// Range: +/-128 MB
public static func encodeBL(from pc: Int, to target: Int) -> Data? {
let delta = (target - pc)
guard delta & 0x3 == 0 else { return nil }
let imm26 = delta >> 2
guard imm26 >= -(1 << 25), imm26 < (1 << 25) else { return nil }
let insn: UInt32 = 0x9400_0000 | (UInt32(bitPattern: Int32(imm26)) & 0x03FF_FFFF)
return ARM64.encodeU32(insn)
}
// MARK: - ADRP / ADD Encoding
/// Encode ADRP instruction.
///
/// ADRP loads a 4KB-aligned page address relative to PC.
/// Format: `[31] = 1 (op)`, `[30:29] = immlo`, `[28:24] = 0b10000`,
/// `[23:5] = immhi`, `[4:0] = Rd`
public static func encodeADRP(rd: UInt32, pc: UInt64, target: UInt64) -> Data? {
let pcPage = pc & ~0xFFF
let targetPage = target & ~0xFFF
let pageDelta = Int64(targetPage) - Int64(pcPage)
let immVal = pageDelta >> 12
guard immVal >= -(1 << 20), immVal < (1 << 20) else { return nil }
let imm21 = UInt32(bitPattern: Int32(immVal)) & 0x1FFFFF
let immlo = imm21 & 0x3
let immhi = (imm21 >> 2) & 0x7FFFF
let insn: UInt32 = (1 << 31) | (immlo << 29) | (0b10000 << 24) | (immhi << 5) | (rd & 0x1F)
return ARM64.encodeU32(insn)
}
/// Encode ADD Xd, Xn, #imm12 (64-bit, no shift).
///
/// Format: `[31] = 1 (sf)`, `[30:29] = 00`, `[28:24] = 0b10001`,
/// `[23:22] = 00 (shift)`, `[21:10] = imm12`, `[9:5] = Rn`, `[4:0] = Rd`
public static func encodeAddImm12(rd: UInt32, rn: UInt32, imm12: UInt32) -> Data? {
guard imm12 < 4096 else { return nil }
let insn: UInt32 = (1 << 31) | (0b0010001 << 24) | (imm12 << 10) | ((rn & 0x1F) << 5) | (rd & 0x1F)
return ARM64.encodeU32(insn)
}
/// Encode MOVZ Wd, #imm16 (32-bit).
///
/// Format: `[31] = 0 (sf)`, `[30:29] = 10`, `[28:23] = 100101`,
/// `[22:21] = hw`, `[20:5] = imm16`, `[4:0] = Rd`
public static func encodeMovzW(rd: UInt32, imm16: UInt16, shift: UInt32 = 0) -> Data? {
let hw = shift / 16
guard hw <= 1 else { return nil }
let insn: UInt32 = (0b0_1010_0101 << 23) | (hw << 21) | (UInt32(imm16) << 5) | (rd & 0x1F)
return ARM64.encodeU32(insn)
}
/// Encode MOVZ Xd, #imm16 (64-bit).
public static func encodeMovzX(rd: UInt32, imm16: UInt16, shift: UInt32 = 0) -> Data? {
let hw = shift / 16
guard hw <= 3 else { return nil }
let insn: UInt32 = (0b1_1010_0101 << 23) | (hw << 21) | (UInt32(imm16) << 5) | (rd & 0x1F)
return ARM64.encodeU32(insn)
}
// MARK: - Decode Helpers
/// Decode a B or BL target address from an instruction at `pc`.
public static func decodeBranchTarget(insn: UInt32, pc: UInt64) -> UInt64? {
let op = insn >> 26
guard op == 0b000101 || op == 0b100101 else { return nil }
let imm26 = insn & 0x03FF_FFFF
// Sign-extend 26-bit to 32-bit
let signedImm = Int32(bitPattern: imm26 << 6) >> 6
let offset = Int64(signedImm) * 4
return UInt64(Int64(pc) + offset)
}
}

View File

@@ -0,0 +1,148 @@
// AVPBooterPatcher.swift AVPBooter DGST bypass patcher.
//
// Python source: scripts/fw_patch.py patch_avpbooter()
//
// Strategy:
// 1. Disassemble the entire binary.
// 2. Find the first instruction that references the DGST marker constant
// (0x4447 appears as a 16-bit immediate in a MOVZ/MOVK encoding of 0x44475354).
// 3. Scan forward (up to 512 instructions) for the nearest RET/RETAA/RETAB.
// 4. Scan backward from RET (up to 32 instructions) for the last `mov x0, ...`
// or conditional-select instruction writing x0/w0.
// 5. Patch that instruction to `mov x0, #0`.
import Foundation
/// Patcher for AVPBooter DGST bypass.
public final class AVPBooterPatcher: Patcher {
public let component = "avpbooter"
public let verbose: Bool
let buffer: BinaryBuffer
let disasm = ARM64Disassembler()
var patches: [PatchRecord] = []
// MARK: - Constants
/// The hex string fragment Capstone emits when an instruction encodes 0x4447
/// (lower half of "DGST" / 0x44475354 little-endian).
private static let dgstSearch = "0x4447"
/// Mnemonics that write to x0/w0 via conditional selection.
private static let cselMnemonics: Set<String> = ["cset", "csinc", "csinv", "csneg"]
/// Mnemonics that terminate a scan region (branch or return).
private static let stopMnemonics: Set<String> = ["ret", "retaa", "retab", "b", "bl", "br", "blr"]
public init(data: Data, verbose: Bool = true) {
buffer = BinaryBuffer(data)
self.verbose = verbose
}
// MARK: - Patcher
public func findAll() throws -> [PatchRecord] {
patches = []
try patchDGSTBypass()
return patches
}
@discardableResult
public func apply() throws -> Int {
let _ = try findAll()
for record in patches {
buffer.writeBytes(at: record.fileOffset, bytes: record.patchedBytes)
}
if verbose, !patches.isEmpty {
print("\n [\(patches.count) AVPBooter patch(es) applied]")
}
return patches.count
}
public var patchedData: Data {
buffer.data
}
// MARK: - DGST Bypass
private func patchDGSTBypass() throws {
// Disassemble entire binary (raw ARM64, base address 0).
let insns = disasm.disassemble(buffer.data, at: 0)
guard !insns.isEmpty else {
throw PatcherError.invalidFormat("AVPBooter: disassembly produced no instructions")
}
// Step 1 locate the first instruction that references the DGST constant.
guard let hitIdx = insns.firstIndex(where: { insn in
"\(insn.mnemonic) \(insn.operandString)".contains(Self.dgstSearch)
}) else {
throw PatcherError.patchSiteNotFound("AVPBooter DGST: constant 0x4447 not found in binary")
}
// Step 2 scan forward up to 512 instructions for a RET epilogue.
let scanEnd = min(hitIdx + 512, insns.count)
guard let retIdx = insns[hitIdx ..< scanEnd].firstIndex(where: { insn in
insn.mnemonic == "ret" || insn.mnemonic == "retaa" || insn.mnemonic == "retab"
}) else {
throw PatcherError.patchSiteNotFound("AVPBooter DGST: epilogue RET not found within 512 instructions")
}
// Step 3 scan backward from RET (up to 32 instructions) for x0/w0 setter.
let backStart = max(retIdx - 32, 0)
var x0Idx: Int? = nil
// Iterate backward: from retIdx-1 down to backStart.
var i = retIdx - 1
while i >= backStart {
let insn = insns[i]
let mn = insn.mnemonic
let op = insn.operandString
if mn == "mov", op.hasPrefix("x0,") || op.hasPrefix("w0,") {
x0Idx = i
break
}
if Self.cselMnemonics.contains(mn), op.hasPrefix("x0,") || op.hasPrefix("w0,") {
x0Idx = i
break
}
// Stop if we cross a function boundary or unconditional branch.
if Self.stopMnemonics.contains(mn) {
break
}
i -= 1
}
guard let targetIdx = x0Idx else {
throw PatcherError.patchSiteNotFound("AVPBooter DGST: x0 setter not found before RET")
}
let target = insns[targetIdx]
let fileOff = Int(target.address) // base address is 0, so VA == file offset
let originalBytes = buffer.readBytes(at: fileOff, count: 4)
let patchedBytes = ARM64.movX0_0
let beforeStr = "\(target.mnemonic) \(target.operandString)"
let afterInsn = disasm.disassembleOne(patchedBytes, at: UInt64(fileOff))
let afterStr = afterInsn.map { "\($0.mnemonic) \($0.operandString)" } ?? "mov x0, #0"
let record = PatchRecord(
patchID: "avpbooter.dgst_bypass",
component: component,
fileOffset: fileOff,
virtualAddress: nil,
originalBytes: originalBytes,
patchedBytes: patchedBytes,
beforeDisasm: beforeStr,
afterDisasm: afterStr,
description: "DGST validation bypass: force x0=0 return value"
)
patches.append(record)
if verbose {
print(String(format: " 0x%06X: %@ → %@ [avpbooter.dgst_bypass]",
fileOff, beforeStr, afterStr))
}
}
}

View File

@@ -0,0 +1,124 @@
// BinaryBuffer.swift Mutable binary data buffer with read/write helpers.
import Foundation
/// A mutable binary buffer for reading and patching firmware data.
public final class BinaryBuffer: @unchecked Sendable {
/// The mutable working data.
public var data: Data
/// The original immutable snapshot (for before/after comparison).
public let original: Data
public var count: Int {
data.count
}
public init(_ data: Data) {
self.data = data
original = data
}
public convenience init(contentsOf url: URL) throws {
try self.init(Data(contentsOf: url))
}
// MARK: - Read Helpers
/// Read a little-endian UInt32 at the given byte offset.
@inlinable
public func readU32(at offset: Int) -> UInt32 {
data.withUnsafeBytes { buf in
buf.load(fromByteOffset: offset, as: UInt32.self)
}
}
/// Read a little-endian UInt64 at the given byte offset.
@inlinable
public func readU64(at offset: Int) -> UInt64 {
data.withUnsafeBytes { buf in
buf.load(fromByteOffset: offset, as: UInt64.self)
}
}
/// Read bytes at the given range.
public func readBytes(at offset: Int, count: Int) -> Data {
data[offset ..< offset + count]
}
// MARK: - Write Helpers
/// Write a little-endian UInt32 at the given byte offset.
@inlinable
public func writeU32(at offset: Int, value: UInt32) {
withUnsafeBytes(of: value.littleEndian) { src in
data.replaceSubrange(offset ..< offset + 4, with: src)
}
}
/// Write raw bytes at the given offset.
public func writeBytes(at offset: Int, bytes: Data) {
data.replaceSubrange(offset ..< offset + bytes.count, with: bytes)
}
// MARK: - Search Helpers
/// Find all occurrences of a byte pattern in the data.
public func findAll(_ pattern: Data, in range: Range<Int>? = nil) -> [Int] {
let searchRange = range ?? 0 ..< data.count
var results: [Int] = []
var offset = searchRange.lowerBound
while offset < searchRange.upperBound - pattern.count + 1 {
if let found = data.range(of: pattern, in: offset ..< searchRange.upperBound) {
results.append(found.lowerBound)
offset = found.lowerBound + 1
} else {
break
}
}
return results
}
/// Find a null-terminated C string at the given offset.
public func readCString(at offset: Int) -> String? {
data.withUnsafeBytes { buf in
guard offset < buf.count else { return nil }
let ptr = buf.baseAddress!.advanced(by: offset)
.assumingMemoryBound(to: CChar.self)
return String(cString: ptr)
}
}
/// Find the first occurrence of a C string in the data.
/// Matches Python `find_string()`: walks backward from the match to the
/// preceding NUL byte so that the returned offset is the C-string start.
public func findString(_ string: String, from: Int = 0) -> Int? {
guard let encoded = string.data(using: .utf8) else { return nil }
// Try with null terminator first (exact C-string match)
var pattern = encoded
pattern.append(0)
if let range = data.range(of: pattern, in: from ..< data.count) {
// Walk backward to the preceding NUL that's the C string start
var cstr = range.lowerBound
while cstr > 0, data[cstr - 1] != 0 {
cstr -= 1
}
return cstr
}
// Try without null terminator (substring match)
if let range = data.range(of: encoded, in: from ..< data.count) {
var cstr = range.lowerBound
while cstr > 0, data[cstr - 1] != 0 {
cstr -= 1
}
return cstr
}
return nil
}
/// Find all occurrences of a C string in the data.
public func findAllStrings(_ string: String) -> [Int] {
guard let encoded = string.data(using: .utf8) else { return [] }
return findAll(encoded)
}
}

View File

@@ -0,0 +1,52 @@
// IM4PHandler.swift Wrapper around Img4tool for IM4P firmware container handling.
import Foundation
import Img4tool
/// Handles loading, extracting, and re-packaging IM4P firmware containers.
public enum IM4PHandler {
/// Load a firmware file as IM4P or raw data.
///
/// - Parameter url: Path to the firmware file.
/// - Returns: Tuple of (extracted payload data, original IM4P if applicable).
public static func load(contentsOf url: URL) throws -> (payload: Data, im4p: IM4P?) {
let fileData = try Data(contentsOf: url)
// Try to parse as IM4P first
if let im4p = try? IM4P(fileData) {
let payload = try im4p.payload()
return (payload, im4p)
}
// Fall back to raw data
return (fileData, nil)
}
/// Save patched data back to an IM4P container or as raw data.
///
/// If the original was IM4P, re-packages with the same fourcc and LZFSE compression.
/// Otherwise, writes raw bytes.
///
/// - Parameters:
/// - patchedData: The patched payload bytes.
/// - originalIM4P: The original IM4P container (nil for raw files).
/// - url: Output file path.
public static func save(
patchedData: Data,
originalIM4P: IM4P?,
to url: URL
) throws {
if let original = originalIM4P {
// Re-package as IM4P with same fourcc and LZFSE compression
let newIM4P = try IM4P(
fourcc: original.fourcc,
description: original.description,
payload: patchedData,
compression: "lzfse"
)
try newIM4P.data.write(to: url)
} else {
try patchedData.write(to: url)
}
}
}

View File

@@ -0,0 +1,183 @@
// MachOHelpers.swift Mach-O parsing utilities for firmware patching.
import Foundation
import MachOKit
// MARK: - Segment/Section Info
/// Minimal segment info extracted from a Mach-O binary.
public struct MachOSegmentInfo: Sendable {
public let name: String
public let vmAddr: UInt64
public let vmSize: UInt64
public let fileOffset: UInt64
public let fileSize: UInt64
}
/// Minimal section info extracted from a Mach-O binary.
public struct MachOSectionInfo: Sendable {
public let segmentName: String
public let sectionName: String
public let address: UInt64
public let size: UInt64
public let fileOffset: UInt32
}
// MARK: - MachO Parser
/// Mach-O parsing utilities for kernel/firmware binary analysis.
public enum MachOParser {
/// Parse all segments from a Mach-O binary in a Data buffer.
public static func parseSegments(from data: Data) -> [MachOSegmentInfo] {
var segments: [MachOSegmentInfo] = []
guard data.count > 32 else { return segments }
let magic: UInt32 = data.withUnsafeBytes { $0.load(as: UInt32.self) }
guard magic == 0xFEED_FACF else { return segments } // MH_MAGIC_64
let ncmds: UInt32 = data.withUnsafeBytes { $0.load(fromByteOffset: 16, as: UInt32.self) }
var offset = 32 // sizeof(mach_header_64)
for _ in 0 ..< ncmds {
guard offset + 8 <= data.count else { break }
let cmd: UInt32 = data.withUnsafeBytes { $0.load(fromByteOffset: offset, as: UInt32.self) }
let cmdsize: UInt32 = data.withUnsafeBytes { $0.load(fromByteOffset: offset + 4, as: UInt32.self) }
if cmd == 0x19 { // LC_SEGMENT_64
let nameData = data[offset + 8 ..< offset + 24]
let name = String(data: nameData, encoding: .utf8)?
.trimmingCharacters(in: CharacterSet(charactersIn: "\0")) ?? ""
let vmAddr: UInt64 = data.withUnsafeBytes { $0.load(fromByteOffset: offset + 24, as: UInt64.self) }
let vmSize: UInt64 = data.withUnsafeBytes { $0.load(fromByteOffset: offset + 32, as: UInt64.self) }
let fileOff: UInt64 = data.withUnsafeBytes { $0.load(fromByteOffset: offset + 40, as: UInt64.self) }
let fileSize: UInt64 = data.withUnsafeBytes { $0.load(fromByteOffset: offset + 48, as: UInt64.self) }
segments.append(MachOSegmentInfo(
name: name, vmAddr: vmAddr, vmSize: vmSize,
fileOffset: fileOff, fileSize: fileSize
))
}
offset += Int(cmdsize)
}
return segments
}
/// Parse all sections from a Mach-O binary.
/// Returns a dictionary keyed by "segment,section".
public static func parseSections(from data: Data) -> [String: MachOSectionInfo] {
var sections: [String: MachOSectionInfo] = [:]
guard data.count > 32 else { return sections }
let magic: UInt32 = data.withUnsafeBytes { $0.load(as: UInt32.self) }
guard magic == 0xFEED_FACF else { return sections }
let ncmds: UInt32 = data.withUnsafeBytes { $0.load(fromByteOffset: 16, as: UInt32.self) }
var offset = 32
for _ in 0 ..< ncmds {
guard offset + 8 <= data.count else { break }
let cmd: UInt32 = data.withUnsafeBytes { $0.load(fromByteOffset: offset, as: UInt32.self) }
let cmdsize: UInt32 = data.withUnsafeBytes { $0.load(fromByteOffset: offset + 4, as: UInt32.self) }
if cmd == 0x19 { // LC_SEGMENT_64
let segNameData = data[offset + 8 ..< offset + 24]
let segName = String(data: segNameData, encoding: .utf8)?
.trimmingCharacters(in: CharacterSet(charactersIn: "\0")) ?? ""
let nsects: UInt32 = data.withUnsafeBytes { $0.load(fromByteOffset: offset + 64, as: UInt32.self) }
var sectOff = offset + 72 // sizeof(segment_command_64) header
for _ in 0 ..< nsects {
guard sectOff + 80 <= data.count else { break }
let sectNameData = data[sectOff ..< sectOff + 16]
let sectName = String(data: sectNameData, encoding: .utf8)?
.trimmingCharacters(in: CharacterSet(charactersIn: "\0")) ?? ""
let addr: UInt64 = data.withUnsafeBytes { $0.load(fromByteOffset: sectOff + 32, as: UInt64.self) }
let size: UInt64 = data.withUnsafeBytes { $0.load(fromByteOffset: sectOff + 40, as: UInt64.self) }
let fileOff: UInt32 = data.withUnsafeBytes { $0.load(fromByteOffset: sectOff + 48, as: UInt32.self) }
let key = "\(segName),\(sectName)"
sections[key] = MachOSectionInfo(
segmentName: segName, sectionName: sectName,
address: addr, size: size, fileOffset: fileOff
)
sectOff += 80
}
}
offset += Int(cmdsize)
}
return sections
}
/// Convert a virtual address to a file offset using segment mappings.
public static func vaToFileOffset(_ va: UInt64, segments: [MachOSegmentInfo]) -> Int? {
for seg in segments {
if va >= seg.vmAddr, va < seg.vmAddr + seg.vmSize {
return Int(seg.fileOffset + (va - seg.vmAddr))
}
}
return nil
}
/// Convert a virtual address to a file offset by parsing segments from data.
public static func vaToFileOffset(_ va: UInt64, in data: Data) -> Int? {
let segments = parseSegments(from: data)
return vaToFileOffset(va, segments: segments)
}
/// Parse LC_SYMTAB information.
/// Returns (symoff, nsyms, stroff, strsize) or nil.
public static func parseSymtab(from data: Data) -> (symoff: Int, nsyms: Int, stroff: Int, strsize: Int)? {
guard data.count > 32 else { return nil }
let ncmds: UInt32 = data.withUnsafeBytes { $0.load(fromByteOffset: 16, as: UInt32.self) }
var offset = 32
for _ in 0 ..< ncmds {
guard offset + 8 <= data.count else { break }
let cmd: UInt32 = data.withUnsafeBytes { $0.load(fromByteOffset: offset, as: UInt32.self) }
let cmdsize: UInt32 = data.withUnsafeBytes { $0.load(fromByteOffset: offset + 4, as: UInt32.self) }
if cmd == 0x02 { // LC_SYMTAB
let symoff: UInt32 = data.withUnsafeBytes { $0.load(fromByteOffset: offset + 8, as: UInt32.self) }
let nsyms: UInt32 = data.withUnsafeBytes { $0.load(fromByteOffset: offset + 12, as: UInt32.self) }
let stroff: UInt32 = data.withUnsafeBytes { $0.load(fromByteOffset: offset + 16, as: UInt32.self) }
let strsize: UInt32 = data.withUnsafeBytes { $0.load(fromByteOffset: offset + 20, as: UInt32.self) }
return (Int(symoff), Int(nsyms), Int(stroff), Int(strsize))
}
offset += Int(cmdsize)
}
return nil
}
/// Find a symbol containing the given name fragment. Returns its virtual address.
public static func findSymbol(containing fragment: String, in data: Data) -> UInt64? {
guard let symtab = parseSymtab(from: data) else { return nil }
for i in 0 ..< symtab.nsyms {
let entryOff = symtab.symoff + i * 16 // sizeof(nlist_64)
guard entryOff + 16 <= data.count else { break }
let nStrx: UInt32 = data.withUnsafeBytes { $0.load(fromByteOffset: entryOff, as: UInt32.self) }
let nValue: UInt64 = data.withUnsafeBytes { $0.load(fromByteOffset: entryOff + 8, as: UInt64.self) }
guard nStrx < symtab.strsize, nValue != 0 else { continue }
let strStart = symtab.stroff + Int(nStrx)
guard strStart < data.count else { continue }
// Read null-terminated string
var strEnd = strStart
while strEnd < data.count, strEnd < symtab.stroff + symtab.strsize {
if data[strEnd] == 0 { break }
strEnd += 1
}
if let name = String(data: data[strStart ..< strEnd], encoding: .ascii),
name.contains(fragment)
{
return nValue
}
}
return nil
}
}

View File

@@ -0,0 +1,72 @@
// PatchRecord.swift Per-patch verification record for migration validation.
import Foundation
/// A single patch application record, used to compare Python vs Swift output.
public struct PatchRecord: Codable, Equatable, Sendable {
/// Unique patch identifier (e.g., "kernel.bsd_init_rootvp").
public let patchID: String
/// Component being patched (e.g., "kernelcache", "ibss", "txm").
public let component: String
/// File offset where the patch is applied.
public let fileOffset: Int
/// Virtual address (if applicable, nil for raw binaries).
public let virtualAddress: UInt64?
/// Original bytes before patching.
public let originalBytes: Data
/// Replacement bytes after patching.
public let patchedBytes: Data
/// Capstone disassembly of original bytes.
public let beforeDisasm: String
/// Capstone disassembly of patched bytes.
public let afterDisasm: String
/// Human-readable description of what this patch does.
public let patchDescription: String
public init(
patchID: String,
component: String,
fileOffset: Int,
virtualAddress: UInt64? = nil,
originalBytes: Data,
patchedBytes: Data,
beforeDisasm: String = "",
afterDisasm: String = "",
description: String
) {
self.patchID = patchID
self.component = component
self.fileOffset = fileOffset
self.virtualAddress = virtualAddress
self.originalBytes = originalBytes
self.patchedBytes = patchedBytes
self.beforeDisasm = beforeDisasm
self.afterDisasm = afterDisasm
patchDescription = description
}
}
extension PatchRecord: CustomStringConvertible {
public var description: String {
let addr = virtualAddress.map { String(format: " (VA 0x%llX)", $0) } ?? ""
return String(format: " 0x%06X%@: %@ → %@ [%@]",
fileOffset, addr,
beforeDisasm.isEmpty ? originalBytes.hex : beforeDisasm,
afterDisasm.isEmpty ? patchedBytes.hex : afterDisasm,
patchID)
}
}
extension Data {
var hex: String {
map { String(format: "%02x", $0) }.joined()
}
}

View File

@@ -0,0 +1,29 @@
// PatcherError.swift Error types for firmware patching.
import Foundation
public enum PatcherError: Error, CustomStringConvertible, Sendable {
case fileNotFound(String)
case invalidFormat(String)
case patchSiteNotFound(String)
case patchVerificationFailed(String)
case encodingFailed(String)
case multipleMatchesFound(String, count: Int)
public var description: String {
switch self {
case let .fileNotFound(path):
"File not found: \(path)"
case let .invalidFormat(msg):
"Invalid format: \(msg)"
case let .patchSiteNotFound(msg):
"Patch site not found: \(msg)"
case let .patchVerificationFailed(msg):
"Patch verification failed: \(msg)"
case let .encodingFailed(msg):
"Instruction encoding failed: \(msg)"
case let .multipleMatchesFound(msg, count):
"Expected 1 match for \(msg), found \(count)"
}
}
}

View File

@@ -0,0 +1,28 @@
// PatcherProtocol.swift Common protocol for all firmware patchers.
import Foundation
/// A firmware patcher that can find and apply patches to a binary buffer.
public protocol Patcher {
/// The component name (e.g., "kernelcache", "ibss", "txm").
var component: String { get }
/// Whether to print verbose output.
var verbose: Bool { get }
/// Find all patch sites and return patch records (dry-run mode).
func findAll() throws -> [PatchRecord]
/// Apply all patches to the buffer. Returns the number of patches applied.
@discardableResult
func apply() throws -> Int
}
extension Patcher {
/// Log a message if verbose mode is enabled.
func log(_ message: String) {
if verbose {
print(message)
}
}
}

View File

@@ -0,0 +1,337 @@
// DeviceTreePatcher.swift DeviceTree payload patcher.
//
// Translated from Python source: scripts/dtree.py
//
// Strategy:
// 1. Parse the flat device tree binary into a node/property tree.
// 2. Apply a fixed set of property patches (serial-number, home-button-type,
// artwork-device-subtype, island-notch-location).
// 3. Serialize the modified tree back to flat binary.
import Foundation
/// Patcher for DeviceTree payloads.
public final class DeviceTreePatcher: Patcher {
public let component = "devicetree"
public let verbose: Bool
let buffer: BinaryBuffer
var patches: [PatchRecord] = []
// MARK: - Patch Definitions
/// A single property patch specification.
struct PropertyPatch {
let nodePath: [String]
let property: String
let length: Int
let flags: UInt16
let value: PropertyValue
let patchID: String
let description: String
}
/// The value to write into a device tree property.
enum PropertyValue {
case string(String)
case integer(UInt64)
}
/// Fixed set of device tree patches, matching scripts/dtree.py PATCHES.
static let propertyPatches: [PropertyPatch] = [
PropertyPatch(
nodePath: ["device-tree"],
property: "serial-number",
length: 12,
flags: 0,
value: .string("vphone-1337"),
patchID: "devicetree.serial_number",
description: "Set serial number to vphone-1337"
),
PropertyPatch(
nodePath: ["device-tree", "buttons"],
property: "home-button-type",
length: 4,
flags: 0,
value: .integer(2),
patchID: "devicetree.home_button_type",
description: "Set home button type to 2"
),
PropertyPatch(
nodePath: ["device-tree", "product"],
property: "artwork-device-subtype",
length: 4,
flags: 0,
value: .integer(2556),
patchID: "devicetree.artwork_device_subtype",
description: "Set artwork device subtype to 2556"
),
PropertyPatch(
nodePath: ["device-tree", "product"],
property: "island-notch-location",
length: 4,
flags: 0,
value: .integer(144),
patchID: "devicetree.island_notch_location",
description: "Set island notch location to 144"
),
]
// MARK: - Device Tree Structures
/// A single property in a device tree node.
final class DTProperty {
var name: String
var length: Int
var flags: UInt16
var value: Data
/// File offset of the property value within the flat binary.
let valueOffset: Int
init(name: String, length: Int, flags: UInt16, value: Data, valueOffset: Int) {
self.name = name
self.length = length
self.flags = flags
self.value = value
self.valueOffset = valueOffset
}
}
/// A node in the device tree containing properties and child nodes.
final class DTNode {
var properties: [DTProperty] = []
var children: [DTNode] = []
}
// MARK: - Init
public init(data: Data, verbose: Bool = true) {
buffer = BinaryBuffer(data)
self.verbose = verbose
}
// MARK: - Patcher
public func findAll() throws -> [PatchRecord] {
patches = []
let root = try parsePayload(buffer.data)
try applyPatches(root: root)
return patches
}
@discardableResult
public func apply() throws -> Int {
let _ = try findAll()
for record in patches {
buffer.writeBytes(at: record.fileOffset, bytes: record.patchedBytes)
}
if verbose, !patches.isEmpty {
print("\n [\(patches.count) DeviceTree patch(es) applied]")
}
return patches.count
}
public var patchedData: Data {
buffer.data
}
// MARK: - Parsing
/// Align a value up to the next 4-byte boundary.
private static func align4(_ n: Int) -> Int {
(n + 3) & ~3
}
/// Decode a null-terminated C string from raw bytes.
private static func decodeCString(_ data: Data) -> String {
if let nullIndex = data.firstIndex(of: 0) {
let slice = data[data.startIndex ..< nullIndex]
return String(bytes: slice, encoding: .utf8) ?? ""
}
return String(bytes: data, encoding: .utf8) ?? ""
}
/// Parse a device tree node from the flat binary at the given offset.
/// Returns the parsed node and the offset past the end of the node.
private func parseNode(_ blob: Data, offset: Int) throws -> (DTNode, Int) {
guard offset + 8 <= blob.count else {
throw PatcherError.invalidFormat("DeviceTree: truncated node header at offset \(offset)")
}
let nProps = blob.loadLE(UInt32.self, at: offset)
let nChildren = blob.loadLE(UInt32.self, at: offset + 4)
var pos = offset + 8
let node = DTNode()
for _ in 0 ..< nProps {
guard pos + 36 <= blob.count else {
throw PatcherError.invalidFormat("DeviceTree: truncated property header at offset \(pos)")
}
let nameData = blob[blob.startIndex.advanced(by: pos) ..< blob.startIndex.advanced(by: pos + 32)]
let name = Self.decodeCString(Data(nameData))
let length = Int(blob.loadLE(UInt16.self, at: pos + 32))
let flags = blob.loadLE(UInt16.self, at: pos + 34)
pos += 36
guard pos + length <= blob.count else {
throw PatcherError.invalidFormat("DeviceTree: truncated property value '\(name)' at offset \(pos)")
}
let value = Data(blob[blob.startIndex.advanced(by: pos) ..< blob.startIndex.advanced(by: pos + length)])
let valueOffset = pos
pos += Self.align4(length)
node.properties.append(DTProperty(
name: name, length: length, flags: flags,
value: value, valueOffset: valueOffset
))
}
for _ in 0 ..< nChildren {
let (child, nextPos) = try parseNode(blob, offset: pos)
node.children.append(child)
pos = nextPos
}
return (node, pos)
}
/// Parse the entire device tree payload.
private func parsePayload(_ blob: Data) throws -> DTNode {
let (root, end) = try parseNode(blob, offset: 0)
guard end == blob.count else {
throw PatcherError.invalidFormat(
"DeviceTree: unexpected trailing bytes (\(blob.count - end) extra)"
)
}
return root
}
// MARK: - Node Navigation
/// Get the "name" property value from a node.
private func nodeName(_ node: DTNode) -> String {
for prop in node.properties {
if prop.name == "name" {
return Self.decodeCString(prop.value)
}
}
return ""
}
/// Find a direct child node by name.
private func findChild(_ node: DTNode, name: String) throws -> DTNode {
for child in node.children {
if nodeName(child) == name {
return child
}
}
throw PatcherError.patchSiteNotFound("DeviceTree: missing child node '\(name)'")
}
/// Resolve a node path like ["device-tree", "buttons"] from the root.
private func resolveNode(_ root: DTNode, path: [String]) throws -> DTNode {
guard !path.isEmpty, path[0] == "device-tree" else {
throw PatcherError.patchSiteNotFound("DeviceTree: invalid node path \(path)")
}
var node = root
for name in path.dropFirst() {
node = try findChild(node, name: name)
}
return node
}
/// Find a property by name within a node.
private func findProperty(_ node: DTNode, name: String) throws -> DTProperty {
for prop in node.properties {
if prop.name == name {
return prop
}
}
throw PatcherError.patchSiteNotFound("DeviceTree: missing property '\(name)'")
}
// MARK: - Value Encoding
/// Encode a string value with null termination, padded/truncated to a fixed length.
private static func encodeFixedString(_ text: String, length: Int) -> Data {
var raw = Data(text.utf8)
raw.append(0) // null terminator
if raw.count > length {
return Data(raw.prefix(length))
}
raw.append(contentsOf: [UInt8](repeating: 0, count: length - raw.count))
return raw
}
/// Encode an integer value as little-endian bytes.
private static func encodeInteger(_ value: UInt64, length: Int) throws -> Data {
var data = Data(count: length)
switch length {
case 1:
data[0] = UInt8(value & 0xFF)
case 2:
let v = UInt16(value & 0xFFFF)
data.withUnsafeMutableBytes { $0.storeBytes(of: v.littleEndian, as: UInt16.self) }
case 4:
let v = UInt32(value & 0xFFFF_FFFF)
data.withUnsafeMutableBytes { $0.storeBytes(of: v.littleEndian, as: UInt32.self) }
case 8:
data.withUnsafeMutableBytes { $0.storeBytes(of: value.littleEndian, as: UInt64.self) }
default:
throw PatcherError.invalidFormat("DeviceTree: unsupported integer length \(length)")
}
return data
}
// MARK: - Patch Application
/// Apply all property patches and record each change.
private func applyPatches(root: DTNode) throws {
for patch in Self.propertyPatches {
let node = try resolveNode(root, path: patch.nodePath)
let prop = try findProperty(node, name: patch.property)
let originalBytes = Data(prop.value.prefix(patch.length))
let newValue: Data = switch patch.value {
case let .string(s):
Self.encodeFixedString(s, length: patch.length)
case let .integer(v):
try Self.encodeInteger(v, length: patch.length)
}
let record = PatchRecord(
patchID: patch.patchID,
component: component,
fileOffset: prop.valueOffset,
virtualAddress: nil,
originalBytes: originalBytes,
patchedBytes: newValue,
description: patch.description
)
patches.append(record)
if verbose {
print(String(format: " 0x%06X: %@ → %@ [%@]",
prop.valueOffset,
originalBytes.hex,
newValue.hex,
patch.patchID))
}
}
}
}
// MARK: - Data Helpers
private extension Data {
/// Load a little-endian integer at the given byte offset.
func loadLE<T: FixedWidthInteger>(_: T.Type, at offset: Int) -> T {
withUnsafeBytes { buf in
T(littleEndian: buf.load(fromByteOffset: offset, as: T.self))
}
}
}

View File

@@ -0,0 +1,187 @@
// IBootJBPatcher.swift JB-variant iBoot patcher (nonce bypass).
//
// Python source: scripts/patchers/iboot_jb.py
import Capstone
import Foundation
/// JB-variant patcher for iBoot images.
///
/// Adds iBSS-only patches:
/// 1. patchSkipGenerateNonce locate "boot-nonce" ADRP+ADD refs, find
/// tbz w0, #0, <target> / mov w0, #0 / bl pattern, convert tbz b <target>
public final class IBootJBPatcher: IBootPatcher {
override public func findAll() throws -> [PatchRecord] {
patches = []
if mode == .ibss {
patchSkipGenerateNonce()
}
return patches
}
// MARK: - JB Patches
@discardableResult
func patchSkipGenerateNonce() -> Bool {
let needle = Data("boot-nonce".utf8)
let stringOffsets = buffer.findAll(needle)
if stringOffsets.isEmpty {
if verbose { print(" [-] iBSS JB: no refs to 'boot-nonce'") }
return false
}
// Collect all ADRP+ADD sites that reference any "boot-nonce" occurrence.
var addOffsets: [Int] = []
for strOff in stringOffsets {
let refs = findRefsToOffset(strOff)
for (_, addOff) in refs {
addOffsets.append(addOff)
}
}
if addOffsets.isEmpty {
if verbose { print(" [-] iBSS JB: no ADRP+ADD refs to 'boot-nonce'") }
return false
}
// For each ADD ref, scan forward up to 0x100 bytes for the pattern:
// tbz/tbnz w0, #0, <target>
// mov w0, #0
// bl <anything>
for addOff in addOffsets {
let scanLimit = min(addOff + 0x100, buffer.count - 12)
var scan = addOff
while scan <= scanLimit {
guard
let i0 = disasm.disassembleOne(in: buffer.data, at: scan),
let i1 = disasm.disassembleOne(in: buffer.data, at: scan + 4),
let i2 = disasm.disassembleOne(in: buffer.data, at: scan + 8)
else {
scan += 4
continue
}
// i0 must be tbz or tbnz
guard i0.mnemonic == "tbz" || i0.mnemonic == "tbnz" else {
scan += 4
continue
}
// i0 operands: [0]=reg (w0), [1]=bit (0), [2]=target address
guard
let detail0 = i0.aarch64,
detail0.operands.count >= 3,
detail0.operands[0].type == AARCH64_OP_REG,
detail0.operands[0].reg.rawValue == AARCH64_REG_W0.rawValue,
detail0.operands[1].type == AARCH64_OP_IMM,
detail0.operands[1].imm == 0
else {
scan += 4
continue
}
// i1 must be: mov w0, #0
guard i1.mnemonic == "mov", i1.operandString == "w0, #0" else {
scan += 4
continue
}
// i2 must be bl
guard i2.mnemonic == "bl" else {
scan += 4
continue
}
// Branch target from tbz operand[2]
let target = Int(detail0.operands[2].imm)
guard let patchBytes = ARM64Encoder.encodeB(from: scan, to: target) else {
if verbose {
print(String(format: " [-] iBSS JB: encodeB out of range at 0x%X → 0x%X", scan, target))
}
scan += 4
continue
}
let originalBytes = buffer.readBytes(at: scan, count: 4)
let beforeStr = "\(i0.mnemonic) \(i0.operandString)"
let afterInsn = disasm.disassembleOne(patchBytes, at: UInt64(scan))
let afterStr = afterInsn.map { "\($0.mnemonic) \($0.operandString)" } ?? "b"
let record = PatchRecord(
patchID: "ibss_jb.skip_generate_nonce",
component: component,
fileOffset: scan,
virtualAddress: nil,
originalBytes: originalBytes,
patchedBytes: patchBytes,
beforeDisasm: beforeStr,
afterDisasm: afterStr,
description: "JB: skip generate_nonce"
)
patches.append(record)
if verbose {
print(String(format: " 0x%06X: %@ → %@ [ibss_jb.skip_generate_nonce]",
scan, beforeStr, afterStr))
}
return true
}
}
if verbose { print(" [-] iBSS JB: generate_nonce branch pattern not found") }
return false
}
// MARK: - Reference Search Helpers
/// Find all ADRP+ADD pairs in the binary that point to `targetOff`.
///
/// Scans the entire buffer in 4-byte steps, checking consecutive instruction
/// pairs for the ADRP+ADD pattern. Matches when
/// `adrp_page_addr + add_imm12 == targetOff` (raw binary, base address = 0).
private func findRefsToOffset(_ targetOff: Int) -> [(adrpOff: Int, addOff: Int)] {
let data = buffer.data
let size = buffer.count
var refs: [(Int, Int)] = []
var off = 0
while off + 8 <= size {
guard
let a = disasm.disassembleOne(in: data, at: off),
let b = disasm.disassembleOne(in: data, at: off + 4)
else {
off += 4
continue
}
guard
a.mnemonic == "adrp",
b.mnemonic == "add",
let detA = a.aarch64,
let detB = b.aarch64,
detA.operands.count >= 2,
detB.operands.count >= 3,
// Destination register of ADRP must match source register of ADD
detA.operands[0].reg.rawValue == detB.operands[1].reg.rawValue,
detA.operands[1].type == AARCH64_OP_IMM,
detB.operands[2].type == AARCH64_OP_IMM
else {
off += 4
continue
}
let pageAddr = detA.operands[1].imm // ADRP result (page-aligned VA)
let addImm = detB.operands[2].imm // ADD immediate (page offset)
if pageAddr + addImm == Int64(targetOff) {
refs.append((off, off + 4))
}
off += 4
}
return refs
}
}

View File

@@ -0,0 +1,585 @@
// IBootPatcher.swift iBoot chain patcher (iBSS, iBEC, LLB).
//
// Translated from Python: scripts/patchers/iboot.py
// Each patch mirrors Python logic exactly no hardcoded offsets.
//
// Patch schedule by mode:
// ibss serial labels + image4 callback
// ibec ibss + boot-args
// llb ibec + rootfs bypass (5 patches) + panic bypass
import Capstone
import Foundation
/// Patcher for iBoot components (iBSS, iBEC, LLB).
public class IBootPatcher: Patcher {
// MARK: - Types
public enum Mode: String, Sendable {
case ibss
case ibec
case llb
}
// MARK: - Constants
/// Default custom boot-args string (Python: IBootPatcher.BOOT_ARGS)
static let bootArgs = "serial=3 -v debug=0x2014e %s"
/// Chunked disassembly parameters (Python: CHUNK_SIZE, OVERLAP)
private static let chunkSize = 0x2000
private static let chunkOverlap = 0x100
// MARK: - Properties
public let component: String
public let verbose: Bool
let buffer: BinaryBuffer
let mode: Mode
let disasm = ARM64Disassembler()
var patches: [PatchRecord] = []
// MARK: - Init
public init(data: Data, mode: Mode, verbose: Bool = true) {
buffer = BinaryBuffer(data)
self.mode = mode
component = mode.rawValue
self.verbose = verbose
}
// MARK: - Patcher Protocol
public func findAll() throws -> [PatchRecord] {
patches = []
patchSerialLabels()
patchImage4Callback()
if mode == .ibec || mode == .llb {
patchBootArgs()
}
if mode == .llb {
patchRootfssBypass()
patchPanicBypass()
}
return patches
}
@discardableResult
public func apply() throws -> Int {
let _ = try findAll()
for record in patches {
buffer.writeBytes(at: record.fileOffset, bytes: record.patchedBytes)
}
if verbose, !patches.isEmpty {
print("\n [\(patches.count) \(mode.rawValue) patches applied]")
}
return patches.count
}
/// Get the patched data.
public var patchedData: Data {
buffer.data
}
// MARK: - Emit Helpers
/// Record a code patch (disassembles before/after for logging).
func emit(_ offset: Int, _ patchBytes: Data, id: String, description: String) {
let originalBytes = buffer.readBytes(at: offset, count: patchBytes.count)
let beforeInsn = disasm.disassembleOne(in: buffer.original, at: offset)
let afterInsn = disasm.disassembleOne(patchBytes, at: UInt64(offset))
let beforeStr = beforeInsn.map { "\($0.mnemonic) \($0.operandString)" } ?? "???"
let afterStr = afterInsn.map { "\($0.mnemonic) \($0.operandString)" } ?? "???"
let record = PatchRecord(
patchID: id,
component: component,
fileOffset: offset,
originalBytes: originalBytes,
patchedBytes: patchBytes,
beforeDisasm: beforeStr,
afterDisasm: afterStr,
description: description
)
patches.append(record)
if verbose {
print(String(format: " 0x%06X: %@ → %@ [%@]", offset, beforeStr, afterStr, description))
}
}
/// Record a string/data patch (not disassemblable).
func emitString(_ offset: Int, _ data: Data, id: String, description: String) {
let originalBytes = buffer.readBytes(at: offset, count: data.count)
let txt = String(data: data, encoding: .ascii) ?? data.hex
let record = PatchRecord(
patchID: id,
component: component,
fileOffset: offset,
originalBytes: originalBytes,
patchedBytes: data,
beforeDisasm: "",
afterDisasm: repr(txt),
description: description
)
patches.append(record)
if verbose {
print(String(format: " 0x%06X: → %@ [%@]", offset, repr(txt), description))
}
}
private func repr(_ s: String) -> String {
"\"\(s)\""
}
// MARK: - Pattern Search Helpers
/// Encode `mov w8, #<imm16>` (MOVZ W8, #imm) as 4 little-endian bytes.
/// MOVZ W encoding: [31]=0 sf, [30:29]=10, [28:23]=100101, [22:21]=hw=00,
/// [20:5]=imm16, [4:0]=Rd=8
func encodedMovW8(_ imm16: UInt32) -> Data {
let insn: UInt32 = 0x5280_0000 | ((imm16 & 0xFFFF) << 5) | 8
return withUnsafeBytes(of: insn.littleEndian) { Data($0) }
}
/// Encode `movk w8, #<imm16>, lsl #16` (MOVK W8, #imm, LSL #16).
/// MOVK W: [31]=0, [30:29]=11, [28:23]=100101, [22:21]=hw=01,
/// [20:5]=imm16, [4:0]=Rd=8
func encodedMovkW8Lsl16(_ imm16: UInt32) -> Data {
let insn: UInt32 = 0x72A0_0000 | ((imm16 & 0xFFFF) << 5) | 8
return withUnsafeBytes(of: insn.littleEndian) { Data($0) }
}
/// Find all file offsets where the given 4-byte pattern appears.
/// Equivalent to Python `_find_asm_pattern(data, asm_str)`.
func findPattern(_ pattern: Data) -> [Int] {
buffer.findAll(pattern)
}
// MARK: - Chunked Disassembly
/// Yield chunks of disassembled instructions over the whole binary.
/// Mirrors Python `_chunked_disasm()` with CHUNK_SIZE=0x2000, OVERLAP=0x100.
func chunkedDisasm() -> [[Instruction]] {
let size = buffer.original.count
var results: [[Instruction]] = []
var off = 0
while off < size {
let end = min(off + IBootPatcher.chunkSize, size)
let chunkLen = end - off
let slice = buffer.original[off ..< off + chunkLen]
let insns = disasm.disassemble(Data(slice), at: UInt64(off))
results.append(insns)
off += IBootPatcher.chunkSize - IBootPatcher.chunkOverlap
}
return results
}
// MARK: - 1. Serial Labels
/// Find the two long '====...' banner runs and write the mode label into each.
/// Python: `patch_serial_labels()`
func patchSerialLabels() {
let labelStr = "Loaded \(mode.rawValue.uppercased())"
guard let labelBytes = labelStr.data(using: .ascii) else { return }
// Collect all runs of '=' (>=20 chars) same logic as Python.
let raw = buffer.original
var eqRuns: [Int] = []
var i = raw.startIndex
while i < raw.endIndex {
if raw[i] == UInt8(ascii: "=") {
let start = i
while i < raw.endIndex, raw[i] == UInt8(ascii: "=") {
i = raw.index(after: i)
}
let runLen = raw.distance(from: start, to: i)
if runLen >= 20 {
eqRuns.append(raw.distance(from: raw.startIndex, to: start))
}
} else {
i = raw.index(after: i)
}
}
if eqRuns.count < 2 {
if verbose { print(" [-] serial labels: <2 banner runs found") }
return
}
for runStart in eqRuns.prefix(2) {
let writeOff = runStart + 1 // Python: run_start + 1
emitString(writeOff, labelBytes, id: "\(component).serial_label", description: "serial label")
}
}
// MARK: - 2. image4_validate_property_callback
/// Find the b.ne + mov x0, x22 pattern with a preceding cmp.
/// Patch: b.ne NOP, mov x0, x22 mov x0, #0.
/// Python: `patch_image4_callback()`
func patchImage4Callback() {
var candidates: [(addr: Int, hasNeg1: Bool)] = []
for insns in chunkedDisasm() {
let count = insns.count
guard count >= 2 else { continue }
for i in 0 ..< count - 1 {
let a = insns[i]
let b = insns[i + 1]
// Must be: b.ne followed immediately by mov x0, x22
guard a.mnemonic == "b.ne" else { continue }
guard b.mnemonic == "mov", b.operandString == "x0, x22" else { continue }
let addr = Int(a.address)
// There must be a cmp within the 8 preceding instructions
let lookback = max(0, i - 8)
let hasCmp = insns[lookback ..< i].contains { $0.mnemonic == "cmp" }
guard hasCmp else { continue }
// Check if a movn w22 / mov w22, #-1 appears within 64 insns before (prefer this candidate)
let far = max(0, i - 64)
let hasNeg1 = insns[far ..< i].contains { insn in
if insn.mnemonic == "movn", insn.operandString.hasPrefix("w22,") {
return true
}
if insn.mnemonic == "mov", insn.operandString.contains("w22"),
insn.operandString.contains("#-1") || insn.operandString.contains("#0xffffffff")
{
return true
}
return false
}
candidates.append((addr: addr, hasNeg1: hasNeg1))
}
}
if candidates.isEmpty {
if verbose { print(" [-] image4 callback: pattern not found") }
return
}
// Prefer the candidate that has a movn w22 (error return path)
let off: Int = if let preferred = candidates.first(where: { $0.hasNeg1 }) {
preferred.addr
} else {
candidates.last!.addr
}
emit(off, ARM64.nop, id: "\(component).image4_callback_bne", description: "image4 callback: b.ne → nop")
emit(off + 4, ARM64.movX0_0, id: "\(component).image4_callback_mov", description: "image4 callback: mov x0,x22 → mov x0,#0")
}
// MARK: - 3. Boot-Args (iBEC / LLB)
/// Redirect ADRP+ADD x2 to a custom boot-args string.
/// Python: `patch_boot_args()`
func patchBootArgs(newArgs: String = IBootPatcher.bootArgs) {
guard let newArgsData = newArgs.data(using: .ascii) else { return }
guard let fmtOff = findBootArgsFmt() else {
if verbose { print(" [-] boot-args: format string not found") }
return
}
guard let (adrpOff, addOff) = findBootArgsAdrp(fmtOff: fmtOff) else {
if verbose { print(" [-] boot-args: ADRP+ADD x2 not found") }
return
}
guard let newOff = findStringSlot(length: newArgsData.count) else {
if verbose { print(" [-] boot-args: no NUL slot") }
return
}
// Write the string itself
emitString(newOff, newArgsData, id: "\(component).boot_args_string", description: "boot-args string")
// Re-encode ADRP x2 new page
guard let newAdrp = ARM64Encoder.encodeADRP(rd: 2, pc: UInt64(adrpOff), target: UInt64(newOff)) else {
if verbose { print(" [-] boot-args: ADRP encoding out of range") }
return
}
emit(adrpOff, newAdrp, id: "\(component).boot_args_adrp", description: "boot-args: adrp x2 → new string page")
// Re-encode ADD x2, x2, #offset
let imm12 = UInt32(newOff & 0xFFF)
guard let newAdd = ARM64Encoder.encodeAddImm12(rd: 2, rn: 2, imm12: imm12) else {
if verbose { print(" [-] boot-args: ADD encoding out of range") }
return
}
emit(addOff, newAdd, id: "\(component).boot_args_add", description: "boot-args: add x2 → new string offset")
}
/// Find the standalone "%s" format string near "rd=md0" or "BootArgs".
/// Python: `_find_boot_args_fmt()`
private func findBootArgsFmt() -> Int? {
let raw = buffer.original
// Find the anchor string
var anchor: Int? = raw.range(of: Data("rd=md0".utf8)).map { raw.distance(from: raw.startIndex, to: $0.lowerBound) }
if anchor == nil {
anchor = raw.range(of: Data("BootArgs".utf8)).map { raw.distance(from: raw.startIndex, to: $0.lowerBound) }
}
guard let anchorOff = anchor else { return nil }
// Search for "%s" within 0x40 bytes of the anchor
let searchEnd = anchorOff + 0x40
let pctS = Data([UInt8(ascii: "%"), UInt8(ascii: "s")])
var off = anchorOff
while off < searchEnd {
guard let range = raw.range(of: pctS, in: off ..< min(searchEnd, raw.count)) else { return nil }
let found = raw.distance(from: raw.startIndex, to: range.lowerBound)
if found >= off + raw.count { return nil }
// Must have NUL before and NUL after (isolated "%s\0")
if found > 0, raw[found - 1] == 0, found + 2 < raw.count, raw[found + 2] == 0 {
return found
}
off = found + 1
}
return nil
}
/// Find ADRP+ADD x2 pointing to the format string at fmtOff.
/// Python: `_find_boot_args_adrp()`
private func findBootArgsAdrp(fmtOff: Int) -> (Int, Int)? {
for insns in chunkedDisasm() {
let count = insns.count
guard count >= 2 else { continue }
for i in 0 ..< count - 1 {
let a = insns[i]
let b = insns[i + 1]
guard a.mnemonic == "adrp", b.mnemonic == "add" else { continue }
// First operand of ADRP must be x2
guard a.operandString.hasPrefix("x2,") else { continue }
guard let aDetail = a.aarch64, let bDetail = b.aarch64 else { continue }
guard aDetail.operands.count >= 2, bDetail.operands.count >= 3 else { continue }
// ADRP Rd must equal ADD Rn (same register)
guard aDetail.operands[0].reg == bDetail.operands[1].reg else { continue }
// ADRP page imm + ADD imm12 must equal fmt_off
let pageImm = aDetail.operands[1].imm // already page-aligned VA
let addImm = bDetail.operands[2].imm
if Int(pageImm + addImm) == fmtOff {
return (Int(a.address), Int(b.address))
}
}
}
return nil
}
/// Find a run of NUL bytes 64 bytes long to write the new string into.
/// Python: `_find_string_slot()`
private func findStringSlot(length: Int, searchStart: Int = 0x14000) -> Int? {
let raw = buffer.original
var off = searchStart
while off < raw.count {
if raw[off] == 0 {
let runStart = off
while off < raw.count, raw[off] == 0 {
off += 1
}
let runLen = off - runStart
if runLen >= 64 {
// Align write pointer to 16 bytes (Python: (run_start + 8 + 15) & ~15)
let writeOff = (runStart + 8 + 15) & ~15
if writeOff + length <= off {
return writeOff
}
}
} else {
off += 1
}
}
return nil
}
// MARK: - 4. Rootfs Bypass (LLB only)
/// Apply all five rootfs bypass patches.
/// Python: `patch_rootfs_bypass()`
func patchRootfssBypass() {
// 4a: cbz/cbnz before error code 0x3B7 unconditional b
patchCbzBeforeError(errorCode: 0x3B7, description: "rootfs: skip sig check (0x3B7)")
// 4b: NOP b.hs after cmp x8, #0x400
patchBhsAfterCmp0x400()
// 4c: cbz/cbnz before error code 0x3C2 unconditional b
patchCbzBeforeError(errorCode: 0x3C2, description: "rootfs: skip sig verify (0x3C2)")
// 4d: NOP cbz x8 null check (ldr x8, [xN, #0x78])
patchNullCheck0x78()
// 4e: cbz/cbnz before error code 0x110 unconditional b
patchCbzBeforeError(errorCode: 0x110, description: "rootfs: skip size verify (0x110)")
}
/// Find unique `mov w8, #<errorCode>` and convert the cbz/cbnz 4 bytes before
/// it into an unconditional branch to the same target.
/// Python: `_patch_cbz_before_error()`
private func patchCbzBeforeError(errorCode: UInt32, description: String) {
let pattern = encodedMovW8(errorCode)
let locs = findPattern(pattern)
guard locs.count == 1 else {
if verbose {
print(" [-] \(description): expected 1 'mov w8, #0x\(String(errorCode, radix: 16))', found \(locs.count)")
}
return
}
let errOff = locs[0]
let cbzOff = errOff - 4
guard let insn = disasm.disassembleOne(in: buffer.original, at: cbzOff) else {
if verbose { print(" [-] \(description): no instruction at 0x\(String(format: "%X", cbzOff))") }
return
}
guard insn.mnemonic == "cbz" || insn.mnemonic == "cbnz" else {
if verbose { print(" [-] \(description): expected cbz/cbnz at 0x\(String(format: "%X", cbzOff)), got \(insn.mnemonic)") }
return
}
// Extract branch target from the operand string (last operand is the immediate)
guard let detail = insn.aarch64, detail.operands.count >= 2 else { return }
let target = Int(detail.operands[1].imm)
guard let bInsn = ARM64Encoder.encodeB(from: cbzOff, to: target) else {
if verbose { print(" [-] \(description): B encoding out of range") }
return
}
emit(cbzOff, bInsn, id: "\(component).rootfs_cbz_0x\(String(errorCode, radix: 16))", description: description)
}
/// Find the unique `cmp x8, #0x400` and NOP the `b.hs` that follows.
/// Python: `_patch_bhs_after_cmp_0x400()`
private func patchBhsAfterCmp0x400() {
// Scan every instruction for cmp x8, #0x400 avoids hand-encoding the
// CMP/SUBS encoding and stays robust across Capstone output variants.
var locs: [Int] = []
for insns in chunkedDisasm() {
for insn in insns {
if insn.mnemonic == "cmp", insn.operandString == "x8, #0x400" {
locs.append(Int(insn.address))
}
}
}
guard locs.count == 1 else {
if verbose { print(" [-] rootfs b.hs: expected 1 'cmp x8, #0x400', found \(locs.count)") }
return
}
let cmpOff = locs[0]
let bhsOff = cmpOff + 4
guard let insn = disasm.disassembleOne(in: buffer.original, at: bhsOff) else {
if verbose { print(" [-] rootfs b.hs: no instruction at 0x\(String(format: "%X", bhsOff))") }
return
}
guard insn.mnemonic == "b.hs" else {
if verbose { print(" [-] rootfs b.hs: expected b.hs at 0x\(String(format: "%X", bhsOff)), got \(insn.mnemonic)") }
return
}
emit(bhsOff, ARM64.nop, id: "\(component).rootfs_bhs_0x400", description: "rootfs: NOP b.hs size check (0x400)")
}
/// Find `ldr xR, [xN, #0x78]; cbz xR` preceding the unique `mov w8, #0x110`
/// and NOP the cbz.
/// Python: `_patch_null_check_0x78()`
private func patchNullCheck0x78() {
let pattern = encodedMovW8(0x110)
let locs = findPattern(pattern)
guard locs.count == 1 else {
if verbose { print(" [-] rootfs null check: expected 1 'mov w8, #0x110', found \(locs.count)") }
return
}
let errOff = locs[0]
// Walk backwards from errOff to find ldr x?, [xN, #0x78]; cbz x?
let scanStart = max(errOff - 0x300, 0)
var scan = errOff - 4
while scan >= scanStart {
guard let i1 = disasm.disassembleOne(in: buffer.original, at: scan),
let i2 = disasm.disassembleOne(in: buffer.original, at: scan + 4)
else {
scan -= 4
continue
}
if i1.mnemonic == "ldr",
i1.operandString.contains("#0x78"),
i2.mnemonic == "cbz",
i2.operandString.hasPrefix("x")
{
emit(scan + 4, ARM64.nop, id: "\(component).rootfs_null_check_0x78",
description: "rootfs: NOP cbz x8 null check (#0x78)")
return
}
scan -= 4
}
if verbose { print(" [-] rootfs null check: ldr+cbz #0x78 pattern not found") }
}
// MARK: - 5. Panic Bypass (LLB only)
/// Find `mov w8, #0x328; movk w8, #0x40, lsl #16; ...; bl X; cbnz w0`
/// and NOP the cbnz.
/// Python: `patch_panic_bypass()`
func patchPanicBypass() {
let mov328 = encodedMovW8(0x328)
let locs = findPattern(mov328)
for loc in locs {
// Verify movk w8, #0x40, lsl #16 follows
guard let nextInsn = disasm.disassembleOne(in: buffer.original, at: loc + 4) else { continue }
guard nextInsn.mnemonic == "movk",
nextInsn.operandString.contains("w8"),
nextInsn.operandString.contains("#0x40"),
nextInsn.operandString.contains("lsl #16") else { continue }
// Walk forward (up to 7 instructions past the movk) to find bl; cbnz w0
var step = loc + 8
while step < loc + 32 {
guard let i = disasm.disassembleOne(in: buffer.original, at: step) else {
step += 4
continue
}
if i.mnemonic == "bl" {
if let ni = disasm.disassembleOne(in: buffer.original, at: step + 4),
ni.mnemonic == "cbnz"
{
emit(step + 4, ARM64.nop,
id: "\(component).panic_bypass",
description: "panic bypass: NOP cbnz w0")
return
}
break // bl found but no cbnz keep scanning other mov candidates
}
step += 4
}
}
if verbose { print(" [-] panic bypass: pattern not found") }
}
}

View File

@@ -0,0 +1,110 @@
// KernelJBPatchAmfiExecve.swift JB kernel patch: AMFI execve kill path bypass (disabled)
//
// Python source: scripts/patchers/kernel_jb_patch_amfi_execve.py
//
// Strategy: All kill paths in the AMFI execve hook converge on a shared
// epilogue that does `MOV W0, #1` (kill) then returns. Changing that single
// instruction to `MOV W0, #0` (allow) converts every kill path to a success
// return without touching the rest of the function.
//
// NOTE: This patch is disabled in the Python reference (not called from the
// main dispatcher). It is implemented here for completeness but is NOT called
// from patchAmfiExecveKillPath() in the orchestrator.
import Foundation
extension KernelJBPatcher {
// MARK: - AMFI execve kill-path bypass (disabled)
/// Bypass AMFI execve kill by patching the shared MOV W0,#1 MOV W0,#0.
///
/// All kill paths in the AMFI execve hook function converge on a shared
/// epilogue: `MOV W0, #1; LDP X29, X30, [SP, #imm]; ...`. Patching the
/// single MOV converts all kill paths to allow-returns.
///
/// This function is implemented but intentionally NOT called from the
/// main Group C dispatcher (matches Python behaviour where it is disabled).
@discardableResult
func patchAmfiExecveKillPath() -> Bool {
log("\n[JB] AMFI execve kill path: shared MOV W0,#1 → MOV W0,#0")
// Find "AMFI: hook..execve() killing" or fallback string.
let killStr: String
if buffer.findString("AMFI: hook..execve() killing") != nil {
killStr = "AMFI: hook..execve() killing"
} else if buffer.findString("execve() killing") != nil {
killStr = "execve() killing"
} else {
log(" [-] execve kill log string not found")
return false
}
guard let strOff = buffer.findString(killStr) else {
log(" [-] execve kill log string not found")
return false
}
// Collect refs in kern_text, fall back to all refs.
var refs: [(adrpOff: Int, addOff: Int)] = []
if let (ks, ke) = kernTextRange {
refs = findStringRefs(strOff, in: (start: ks, end: ke))
}
if refs.isEmpty {
refs = findStringRefs(strOff)
}
guard !refs.isEmpty else {
log(" [-] no refs to execve kill log string")
return false
}
let movW0_1_enc: UInt32 = 0x5280_0020 // MOV W0, #1 (MOVZ W0, #1)
var patched = false
var seenFuncs: Set<Int> = []
for (adrpOff, _) in refs {
guard let funcStart = findFunctionStart(adrpOff) else { continue }
guard !seenFuncs.contains(funcStart) else { continue }
seenFuncs.insert(funcStart)
// Function end = next PACIBSP (capped at 0x800 bytes).
var funcEnd = findFuncEnd(funcStart, maxSize: 0x800)
if let (_, ke) = kernTextRange { funcEnd = min(funcEnd, ke) }
// Scan backward from funcEnd for MOV W0, #1 followed by LDP X29, X30, [SP, #imm].
var targetOff = -1
var off = funcEnd - 8
while off >= funcStart {
if buffer.readU32(at: off) == movW0_1_enc {
// Verify next instruction is LDP X29, X30 (epilogue start)
if let nextInsn = disasAt(off + 4),
nextInsn.mnemonic == "ldp",
nextInsn.operandString.contains("x29"), nextInsn.operandString.contains("x30")
{
targetOff = off
break
}
}
off -= 4
}
guard targetOff >= 0 else {
log(" [-] MOV W0,#1 + epilogue not found in func 0x\(String(format: "%X", funcStart))")
continue
}
emit(targetOff, ARM64.movW0_0,
patchID: "jb.amfi_execve.kill_return",
description: "mov w0,#0 [AMFI kill return → allow]")
log(" [+] Patched kill return at 0x\(String(format: "%X", targetOff)) (func 0x\(String(format: "%X", funcStart)))")
patched = true
break // One function is sufficient
}
if !patched {
log(" [-] AMFI execve kill return not found")
}
return patched
}
}

View File

@@ -0,0 +1,139 @@
// KernelJBPatchAmfiTrustcache.swift JB kernel patch: AMFI trustcache gate bypass
//
// Python source: scripts/patchers/kernel_jb_patch_amfi_trustcache.py
//
// Strategy (semantic function matching):
// Scan amfi_text for functions (PACIBSP boundaries) that match the
// AMFIIsCDHashInTrustCache body shape:
// 1. mov x19, x2 (save x2 into x19)
// 2. stp xzr, xzr, [sp, ...] (stack-zeroing pair)
// 3. mov x2, sp (pass stack slot as out-param)
// 4. bl <lookup>
// 5. mov x20, x0 (save result)
// 6. cbnz w0, ... (fast-path already-trusted check)
// 7. cbz x19, ... (nil out-param guard)
// Exactly one function must match. Rewrite its first 4 instructions with
// the always-allow stub: mov x0,#1 / cbz x2,+8 / str x0,[x2] / ret.
import Foundation
extension KernelJBPatcher {
/// AMFI trustcache gate bypass: rewrite AMFIIsCDHashInTrustCache to always return 1.
@discardableResult
func patchAmfiCdhashInTrustcache() -> Bool {
log("\n[JB] AMFIIsCDHashInTrustCache: always allow + store flag")
// Determine the AMFI text range. Fall back to full __TEXT_EXEC if no kext split.
let amfiRange = amfiTextRange()
let (amfiStart, amfiEnd) = amfiRange
// Instruction encoding constants (used for structural matching).
// Derived semantically no hardcoded offsets, only instruction shape.
let movX19X2: UInt32 = 0xAA02_03F3 // mov x19, x2 (ORR X19, XZR, X2)
let movX2Sp: UInt32 = 0x9100_03E2 // mov x2, sp (ADD X2, SP, #0)
// Mask for STP XZR,XZR,[SP,#imm]: fixed bits excluding the immediate.
// STP (pre-index / signed-offset) 64-bit XZR,XZR: 0xA900_7FFF base
let stpXzrXzrMask: UInt32 = 0xFFC0_7FFF
let stpXzrXzrVal: UInt32 = 0xA900_7FFF // any [sp, #imm_scaled]
// CBZ/CBNZ masks
let cbnzWMask: UInt32 = 0x7F00_0000
let cbnzWVal: UInt32 = 0x3500_0000 // CBNZ 32-bit
let cbzXMask: UInt32 = 0xFF00_0000
let cbzXVal: UInt32 = 0xB400_0000 // CBZ 64-bit
// BL mask
let blMask: UInt32 = 0xFC00_0000
let blVal: UInt32 = 0x9400_0000
var hits: [Int] = []
var off = amfiStart
while off < amfiEnd - 4 {
guard buffer.readU32(at: off) == ARM64.pacibspU32 else {
off += 4
continue
}
let funcStart = off
// Determine function end: next PACIBSP or limit.
var funcEnd = min(funcStart + 0x200, amfiEnd)
var probe = funcStart + 4
while probe < funcEnd {
if buffer.readU32(at: probe) == ARM64.pacibspU32 {
funcEnd = probe
break
}
probe += 4
}
// Collect instructions in this function.
var insns: [UInt32] = []
var p = funcStart
while p < funcEnd {
insns.append(buffer.readU32(at: p))
p += 4
}
// Structural shape check mirrors Python _find_after sequence:
// i1: mov x19, x2
// i2: stp xzr, xzr, [sp, ...]
// i3: mov x2, sp
// i4: bl <anything>
// i5: mov x20, x0 (ORR X20, XZR, X0 = 0xAA0003F4)
// i6: cbnz w0, ...
// i7: cbz x19, ...
let movX20X0: UInt32 = 0xAA00_03F4
guard let i1 = insns.firstIndex(where: { $0 == movX19X2 }) else {
off = funcEnd
continue
}
guard let i2 = insns[(i1 + 1)...].firstIndex(where: { ($0 & stpXzrXzrMask) == stpXzrXzrVal }) else {
off = funcEnd
continue
}
guard let i3 = insns[(i2 + 1)...].firstIndex(where: { $0 == movX2Sp }) else {
off = funcEnd
continue
}
guard let i4 = insns[(i3 + 1)...].firstIndex(where: { ($0 & blMask) == blVal }) else {
off = funcEnd
continue
}
guard let i5 = insns[(i4 + 1)...].firstIndex(where: { $0 == movX20X0 }) else {
off = funcEnd
continue
}
guard insns[(i5 + 1)...].first(where: { ($0 & cbnzWMask) == cbnzWVal && ($0 & 0x1F) == 0 }) != nil else {
off = funcEnd
continue
}
guard insns[(i5 + 1)...].first(where: { ($0 & cbzXMask) == cbzXVal && ($0 & 0x1F) == 19 }) != nil else {
off = funcEnd
continue
}
hits.append(funcStart)
off = funcEnd
}
guard hits.count == 1 else {
log(" [-] expected 1 AMFI trustcache body hit, found \(hits.count)")
return false
}
let funcStart = hits[0]
let va0 = fileOffsetToVA(funcStart)
let va1 = fileOffsetToVA(funcStart + 4)
let va2 = fileOffsetToVA(funcStart + 8)
let va3 = fileOffsetToVA(funcStart + 12)
emit(funcStart, ARM64.movX0_1, patchID: "amfi_trustcache_1", virtualAddress: va0, description: "mov x0,#1 [AMFIIsCDHashInTrustCache]")
emit(funcStart + 4, ARM64.cbzX2_8, patchID: "amfi_trustcache_2", virtualAddress: va1, description: "cbz x2,+8 [AMFIIsCDHashInTrustCache]")
emit(funcStart + 8, ARM64.strX0X2, patchID: "amfi_trustcache_3", virtualAddress: va2, description: "str x0,[x2] [AMFIIsCDHashInTrustCache]")
emit(funcStart + 12, ARM64.ret, patchID: "amfi_trustcache_4", virtualAddress: va3, description: "ret [AMFIIsCDHashInTrustCache]")
return true
}
}

View File

@@ -0,0 +1,188 @@
// KernelJBPatchBsdInitAuth.swift JB: bypass FSIOC_KERNEL_ROOTAUTH failure in _bsd_init.
//
// Python source: scripts/patchers/kernel_jb_patch_bsd_init_auth.py
//
// GUARDRAIL (CLAUDE.md): recover _bsd_init locate rootvp panic block
// find unique in-function call cbnz w0/x0, panic bl imageboot_needed patch gate.
//
// Reveal procedure:
// 1. Recover _bsd_init via symbol table, else via rootvp panic string anchor.
// 2. Inside _bsd_init, find "rootvp not authenticated after mounting" string ref.
// 3. Follow ADRP find the BL to _panic immediately after the ADD.
// 4. Scan backward from the panic ref for `cbnz w0/x0, <panic_region>` preceded by a BL,
// with a BL to _imageboot_needed (or any BL) in the next 3 instructions.
// 5. NOP that cbnz.
import Capstone
import Foundation
extension KernelJBPatcher {
private static let rootvpAuthNeedle = "rootvp not authenticated after mounting"
private static let rootvpAltNeedle = "rootvp not authenticated after mounting @%s:%d"
/// Bypass the real rootvp auth failure branch inside _bsd_init.
@discardableResult
func patchBsdInitAuth() -> Bool {
log("\n[JB] _bsd_init: ignore FSIOC_KERNEL_ROOTAUTH failure")
// Step 1: Recover _bsd_init function start.
guard let funcStart = resolveBsdInit() else {
log(" [-] _bsd_init not found")
return false
}
// Step 2: Find the panic string ref inside this function.
guard let (adrpOff, addOff) = rootvpPanicRefInFunc(funcStart) else {
log(" [-] rootvp panic string ref not found in _bsd_init")
return false
}
// Step 3: Find the BL to _panic near the ADD instruction.
guard let blPanicOff = findPanicCallNear(addOff) else {
log(" [-] BL _panic not found near rootvp panic string")
return false
}
// Step 4: Scan backward from the ADRP for a valid cbnz gate site.
let errLo = blPanicOff - 0x40
let errHi = blPanicOff + 4
let imagebootNeeded = resolveSymbol("_imageboot_needed")
let scanStart = max(funcStart, adrpOff - 0x400)
var candidates: [(off: Int, state: String)] = []
for off in stride(from: scanStart, to: adrpOff, by: 4) {
guard let state = matchRootauthBranchSite(off, errLo: errLo, errHi: errHi, imagebootNeeded: imagebootNeeded) else { continue }
candidates.append((off, state))
}
guard !candidates.isEmpty else {
log(" [-] rootauth branch site not found")
return false
}
let (branchOff, state): (Int, String)
if candidates.count == 1 {
(branchOff, state) = (candidates[0].off, candidates[0].state)
} else {
// If multiple, prefer the "live" (not already patched) one.
let live = candidates.filter { $0.state == "live" }
guard live.count == 1 else {
log(" [-] ambiguous rootauth branch sites: \(candidates.count) found")
return false
}
(branchOff, state) = (live[0].off, live[0].state)
}
if state == "patched" {
log(" [=] rootauth branch already bypassed at 0x\(String(format: "%X", branchOff))")
return true
}
emit(branchOff, ARM64.nop,
patchID: "jb.bsd_init_auth.nop_cbnz",
virtualAddress: fileOffsetToVA(branchOff),
description: "NOP cbnz (rootvp auth) [_bsd_init]")
return true
}
// MARK: - Private helpers
/// Resolve _bsd_init via symbol table, else via rootvp anchor string.
private func resolveBsdInit() -> Int? {
if let off = resolveSymbol("_bsd_init"), off >= 0 {
return off
}
// Fallback: find function that contains the verbose rootvp panic string.
for needle in [Self.rootvpAltNeedle, Self.rootvpAuthNeedle] {
if let strOff = buffer.findString(needle) {
let refs = findStringRefs(strOff)
if let firstRef = refs.first,
let fn = findFunctionStart(firstRef.adrpOff)
{
return fn
}
}
}
return nil
}
/// Find the ADRP+ADD pair for the rootvp panic string inside `funcStart`.
private func rootvpPanicRefInFunc(_ funcStart: Int) -> (adrpOff: Int, addOff: Int)? {
guard let strOff = buffer.findString(Self.rootvpAuthNeedle) else { return nil }
let refs = findStringRefs(strOff)
for (adrpOff, addOff) in refs {
if let fn = findFunctionStart(adrpOff), fn == funcStart {
return (adrpOff, addOff)
}
}
return nil
}
/// Find the BL to _panic within 0x40 bytes after `addOff`.
private func findPanicCallNear(_ addOff: Int) -> Int? {
let limit = min(addOff + 0x40, buffer.count)
for scan in stride(from: addOff, to: limit, by: 4) {
if let target = jbDecodeBL(at: scan),
let panicOff = panicOffset,
target == panicOff
{
return scan
}
}
return nil
}
/// Check if instruction at `off` is the rootauth CBNZ gate site.
/// Returns "live", "patched", or nil.
private func matchRootauthBranchSite(_ off: Int, errLo: Int, errHi: Int, imagebootNeeded: Int?) -> String? {
let insns = disasm.disassemble(in: buffer.data, at: off, count: 1)
guard let insn = insns.first else { return nil }
// Must be preceded by a BL or BLR
guard isBLorBLR(at: off - 4) else { return nil }
// Must have a BL to _imageboot_needed (or any BL if symbol not resolved) within 3 insns after
guard hasImagebootCallNear(off, imagebootNeeded: imagebootNeeded) else { return nil }
// Check if already patched (NOP)
if insn.mnemonic == "nop" { return "patched" }
// Must be CBNZ on w0 or x0
guard insn.mnemonic == "cbnz" else { return nil }
guard let detail = insn.aarch64, !detail.operands.isEmpty else { return nil }
let regOp = detail.operands[0]
guard regOp.type == AARCH64_OP_REG,
regOp.reg == AARCH64_REG_W0 || regOp.reg == AARCH64_REG_X0 else { return nil }
// Branch target must point into the panic block region
guard let (branchTarget, _) = jbDecodeBranchTarget(at: off),
branchTarget >= errLo, branchTarget <= errHi else { return nil }
return "live"
}
/// Return true if there is a BL/BLR/BLRAA/BLRAB/etc. at `off`.
private func isBLorBLR(at off: Int) -> Bool {
guard off >= 0, off + 4 <= buffer.count else { return false }
let insns = disasm.disassemble(in: buffer.data, at: off, count: 1)
guard let insn = insns.first else { return false }
return insn.mnemonic.hasPrefix("bl")
}
/// Return true if there is a BL to _imageboot_needed (or any BL if unknown)
/// within 3 instructions after `off`.
private func hasImagebootCallNear(_ off: Int, imagebootNeeded: Int?) -> Bool {
let limit = min(off + 0x18, buffer.count)
for scan in stride(from: off + 4, to: limit, by: 4) {
guard let target = jbDecodeBL(at: scan) else { continue }
// If we know _imageboot_needed, require an exact match;
// otherwise any BL counts (stripped kernel).
if let ib = imagebootNeeded {
if target == ib { return true }
} else {
return true
}
}
return false
}
}

View File

@@ -0,0 +1,348 @@
// KernelJBPatchCredLabel.swift JB kernel patch: _cred_label_update_execve C21-v3
//
// Python source: scripts/patchers/kernel_jb_patch_cred_label.py
//
// Strategy (C21-v3): Split late exits, add helper bits on success.
// - Keep _cred_label_update_execve body intact.
// - Redirect the shared deny return (MOV W0,#1 just before epilogue) to a
// deny cave that forces W0=0 and returns through the original epilogue.
// - Redirect late success exits (B epilogue preceded by MOV W0,#0) to a
// success cave that reloads x26 = u_int *csflags, clears kill bits, ORs
// CS_GET_TASK_ALLOW|CS_INSTALLER, forces W0=0, then returns via epilogue.
//
// CS mask constants (matching Python):
// RELAX_CSMASK = 0xFFFFC0FF (clears CS_HARD|CS_KILL|CS_RESTRICT etc.)
// RELAX_SETMASK = 0x0000000C (CS_GET_TASK_ALLOW | CS_INSTALLER)
import Foundation
extension KernelJBPatcher {
// MARK: - Constants
private static let retInsns: Set<UInt32> = [0xD65F_0FFF, 0xD65F_0BFF, 0xD65F_03C0]
private static let movW0_0_u32: UInt32 = 0x5280_0000
private static let movW0_1_u32: UInt32 = 0x5280_0020
private static let relaxCSMask: UInt32 = 0xFFFF_C0FF
private static let relaxSetMask: UInt32 = 0x0000_000C
// MARK: - Entry Point
/// C21-v3 split exits + helper bits for _cred_label_update_execve.
func patchCredLabelUpdateExecve() {
log("\n[JB] _cred_label_update_execve: C21-v3 split exits + helper bits")
// 1. Locate the function.
guard let funcOff = locateCredLabelExecveFunc() else {
log(" [-] function not found, skipping shellcode patch")
return
}
log(" [+] func at 0x\(String(format: "%X", funcOff))")
// 2. Find canonical epilogue: last `ldp x29, x30, [sp, ...]` before ret.
guard let epilogueOff = findCredLabelEpilogue(funcOff: funcOff) else {
log(" [-] epilogue not found")
return
}
log(" [+] epilogue at 0x\(String(format: "%X", epilogueOff))")
// 3. Find shared deny return: MOV W0,#1 immediately before the epilogue.
let denyOff = findCredLabelDenyReturn(funcOff: funcOff, epilogueOff: epilogueOff)
// Check if deny is already allow
let denyAlreadyAllowed: Bool
if let denyOff {
denyAlreadyAllowed = buffer.readU32(at: denyOff) == Self.movW0_0_u32
if denyAlreadyAllowed {
log(" [=] deny return at 0x\(String(format: "%X", denyOff)) already MOV W0,#0, skipping deny trampoline")
}
} else {
log(" [-] shared deny return not found")
return
}
// 4. Find success exits: B epilogue with preceding MOV W0,#0.
let successExits = findCredLabelSuccessExits(funcOff: funcOff, epilogueOff: epilogueOff)
guard !successExits.isEmpty else {
log(" [-] success exits not found")
return
}
// 5. Recover csflags stack reload instruction bytes.
guard let (csflagsInsn, csflagsDesc) = findCredLabelCSFlagsReload(funcOff: funcOff) else {
log(" [-] csflags stack reload (ldr x26, [x29, #imm]) not found")
return
}
// 6. Allocate code caves.
var denyCaveOff: Int? = nil
if !denyAlreadyAllowed {
denyCaveOff = findCodeCave(size: 8)
guard denyCaveOff != nil else {
log(" [-] no code cave for C21-v3 deny trampoline")
return
}
}
// Success cave: 8 instructions = 32 bytes
guard let successCaveOff = findCodeCave(size: 32),
successCaveOff != denyCaveOff
else {
log(" [-] no code cave for C21-v3 success trampoline")
return
}
// 7. Build deny shellcode (8 bytes): MOV W0,#0 + B epilogue.
if !denyAlreadyAllowed, let dOff = denyOff, let dCaveOff = denyCaveOff {
guard let branchBack = encodeB(from: dCaveOff + 4, to: epilogueOff) else {
log(" [-] deny trampoline → epilogue branch out of range")
return
}
let denyShellcode = ARM64.movW0_0 + branchBack
// Write deny cave
for i in stride(from: 0, to: denyShellcode.count, by: 4) {
let chunk = denyShellcode[denyShellcode.index(denyShellcode.startIndex, offsetBy: i) ..< denyShellcode.index(denyShellcode.startIndex, offsetBy: i + 4)]
emit(dCaveOff + i, Data(chunk),
patchID: "jb.cred_label_update_execve.deny_cave",
description: "deny_trampoline+\(i) [_cred_label_update_execve C21-v3]")
}
// Redirect deny site deny cave
guard let branchToCave = encodeB(from: dOff, to: dCaveOff) else {
log(" [-] branch from deny site 0x\(String(format: "%X", dOff)) to cave out of range")
return
}
emit(dOff, branchToCave,
patchID: "jb.cred_label_update_execve.deny_redirect",
description: "b deny cave [_cred_label_update_execve C21-v3 exit @ 0x\(String(format: "%X", dOff))]")
}
// 8. Build success shellcode (8 instrs = 32 bytes):
// ldr x26, [x29, #imm] (reload csflags ptr from stack)
// cbz x26, #0x10 (skip if null)
// ldr w8, [x26]
// and w8, w8, #relaxCSMask
// orr w8, w8, #relaxSetMask
// str w8, [x26]
// mov w0, #0
// b epilogue
guard let successBranchBack = encodeB(from: successCaveOff + 28, to: epilogueOff) else {
log(" [-] success trampoline → epilogue branch out of range")
return
}
var successShellcode = Data()
successShellcode += csflagsInsn // ldr x26, [x29, #imm]
successShellcode += encodeCBZ_X26_skip16() // cbz x26, #0x10 (skip 4 insns)
successShellcode += encodeLDR_W8_X26() // ldr w8, [x26]
successShellcode += encodeAND_W8_W8_mask(Self.relaxCSMask) // and w8, w8, #0xFFFFC0FF
successShellcode += encodeORR_W8_W8_imm(Self.relaxSetMask) // orr w8, w8, #0xC
successShellcode += encodeSTR_W8_X26() // str w8, [x26]
successShellcode += ARM64.movW0_0 // mov w0, #0
successShellcode += successBranchBack // b epilogue
guard successShellcode.count == 32 else {
log(" [-] success shellcode size mismatch: \(successShellcode.count) != 32")
return
}
for i in stride(from: 0, to: successShellcode.count, by: 4) {
let chunk = successShellcode[successShellcode.index(successShellcode.startIndex, offsetBy: i) ..< successShellcode.index(successShellcode.startIndex, offsetBy: i + 4)]
emit(successCaveOff + i, Data(chunk),
patchID: "jb.cred_label_update_execve.success_cave",
description: "success_trampoline+\(i) [_cred_label_update_execve C21-v3]")
}
// 9. Redirect success exits success cave.
for exitOff in successExits {
guard let branchToCave = encodeB(from: exitOff, to: successCaveOff) else {
log(" [-] branch from success exit 0x\(String(format: "%X", exitOff)) to cave out of range")
return
}
emit(exitOff, branchToCave,
patchID: "jb.cred_label_update_execve.success_redirect",
description: "b success cave [_cred_label_update_execve C21-v3 exit @ 0x\(String(format: "%X", exitOff))]")
}
}
// MARK: - Function Locators
/// Locate _cred_label_update_execve: try symbol first, then string-cluster scan.
private func locateCredLabelExecveFunc() -> Int? {
// Symbol lookup
for (sym, off) in symbols {
if sym.contains("cred_label_update_execve"), !sym.contains("hook") {
if isCredLabelExecveCandidate(funcOff: off) {
return off
}
}
}
return findCredLabelExecveByStrings()
}
/// Validate candidate function shape for _cred_label_update_execve.
private func isCredLabelExecveCandidate(funcOff: Int) -> Bool {
let funcEnd = findFuncEnd(funcOff, maxSize: 0x1000)
guard funcEnd - funcOff >= 0x200 else { return false }
// Must contain ldr x26, [x29, #imm]
return findCredLabelCSFlagsReload(funcOff: funcOff) != nil
}
/// String-cluster search for _cred_label_update_execve.
private func findCredLabelExecveByStrings() -> Int? {
let anchorStrings = [
"AMFI: hook..execve() killing",
"Attempt to execute completely unsigned code",
"Attempt to execute a Legacy VPN Plugin",
"dyld signature cannot be verified",
]
var candidates: Set<Int> = []
for anchor in anchorStrings {
guard let strOff = buffer.findString(anchor) else { continue }
let refs = findStringRefs(strOff)
for (adrpOff, _) in refs {
if let funcStart = findFunctionStart(adrpOff) {
candidates.insert(funcStart)
}
}
}
// Pick best candidate (largest, as a proxy for most complex body)
var bestFunc: Int? = nil
var bestScore = -1
for funcOff in candidates {
let funcEnd = findFuncEnd(funcOff, maxSize: 0x1000)
let score = funcEnd - funcOff
if score > bestScore, isCredLabelExecveCandidate(funcOff: funcOff) {
bestScore = score
bestFunc = funcOff
}
}
return bestFunc
}
// MARK: - Epilogue / Deny / Success Finders
/// Find the canonical epilogue: last `ldp x29, x30, [sp, ...]` in function.
private func findCredLabelEpilogue(funcOff: Int) -> Int? {
let funcEnd = findFuncEnd(funcOff, maxSize: 0x1000)
for off in stride(from: funcEnd - 4, through: funcOff, by: -4) {
guard let insn = disasAt(off) else { continue }
let op = insn.operandString.replacingOccurrences(of: " ", with: "")
if insn.mnemonic == "ldp", op.hasPrefix("x29,x30,[sp") {
return off
}
}
return nil
}
/// Find shared deny return: MOV W0,#1 at epilogueOff - 4.
private func findCredLabelDenyReturn(funcOff: Int, epilogueOff: Int) -> Int? {
let scanStart = max(funcOff, epilogueOff - 0x40)
for off in stride(from: epilogueOff - 4, through: scanStart, by: -4) {
if buffer.readU32(at: off) == Self.movW0_1_u32, off + 4 == epilogueOff {
return off
}
}
return nil
}
/// Find success exits: `b epilogue` preceded (within 0x10 bytes) by `mov w0, #0`.
private func findCredLabelSuccessExits(funcOff: Int, epilogueOff: Int) -> [Int] {
var exits: [Int] = []
let funcEnd = findFuncEnd(funcOff, maxSize: 0x1000)
for off in stride(from: funcOff, to: funcEnd, by: 4) {
guard let target = jbDecodeBBranch(at: off), target == epilogueOff else { continue }
// Scan back for MOV W0, #0 in preceding 4 instructions
var hasMov = false
let scanBack = max(funcOff, off - 0x10)
for prev in stride(from: off - 4, through: scanBack, by: -4) {
if buffer.readU32(at: prev) == Self.movW0_0_u32 {
hasMov = true
break
}
}
if hasMov { exits.append(off) }
}
return exits
}
/// Recover ldr x26, [x29, #imm] instruction bytes from the function body.
private func findCredLabelCSFlagsReload(funcOff: Int) -> (Data, String)? {
let funcEnd = findFuncEnd(funcOff, maxSize: 0x1000)
for off in stride(from: funcOff, to: funcEnd, by: 4) {
guard let insn = disasAt(off) else { continue }
let op = insn.operandString.replacingOccurrences(of: " ", with: "")
if insn.mnemonic == "ldr", op.hasPrefix("x26,[x29") {
// Return the raw 4 bytes plus the disassembly string
let insnBytes = buffer.data[off ..< off + 4]
return (Data(insnBytes), insn.operandString)
}
}
return nil
}
// MARK: - Instruction Encoders
/// CBZ X26, #0x10 skip 4 instructions if x26 == 0
private func encodeCBZ_X26_skip16() -> Data {
// CBZ encoding: [31]=1 (64-bit), [30:24]=0110100, [23:5]=imm19, [4:0]=Rt
// imm19 = offset/4 = 16/4 = 4 bits [23:5] = 4 << 5 = 0x80
// Full: 1_0110100_000000000000000000100_11010 = ?
// CBZ X26 = 0xB400_0000 | (imm19 << 5) | 26
// imm19 = 4, Rt = 26 (x26)
let imm19: UInt32 = 4
let insn: UInt32 = 0xB400_0000 | (imm19 << 5) | 26
return ARM64.encodeU32(insn)
}
/// LDR W8, [X26]
private func encodeLDR_W8_X26() -> Data {
// LDR W8, [X26] 32-bit load, no offset
// Encoding: size=10, V=0, opc=01, imm12=0, Rn=X26(26), Rt=W8(8)
// 1011 1001 0100 0000 0000 0011 0100 1000
// 0xB940_0348
let insn: UInt32 = 0xB940_0348
return ARM64.encodeU32(insn)
}
/// STR W8, [X26]
private func encodeSTR_W8_X26() -> Data {
// STR W8, [X26] 32-bit store, no offset
// 0xB900_0348
let insn: UInt32 = 0xB900_0348
return ARM64.encodeU32(insn)
}
/// AND W8, W8, #imm (32-bit logical immediate).
/// For mask 0xFFFFC0FF: encodes as NOT(0x3F00) = elements with inverted bits
private func encodeAND_W8_W8_mask(_: UInt32) -> Data {
// We encode directly using ARM64 logical immediate encoding.
// For 0xFFFFC0FF: this is ~0x3F00 which represents "clear bits 8..13".
// Logical imm: sf=0 (32-bit), N=0, immr=8, imms=5 for ~(0x3F<<8)
// Actually use: AND W8, W8, #0xFFFFC0FF
// N=0, immr=8, imms=5: encodes 6 replicated ones starting at bit 8 being 0
// Encoding: 0_00100100_N_immr_imms_Rn_Rd
// sf=0, opc=00, AND imm: 0001 0010 0 N immr imms Rn Rd
// For mask 0xFFFFC0FF in 32-bit:
// bit pattern: 1111 1111 1111 1100 0000 0000 1111 1111
// inverted: 0000 0000 0000 0011 1111 1111 0000 0000 = 0x3F00
// This is a run of 8 ones (bits 8-15 are zero so inverted = ones)
// N=0, immr=8, imms=5 (count-1 of ones in the "element" minus 1)
// But we have 6 zeros in positions 8..13, not a clean power-of-2 element.
// Actually 0xFFFFC0FF has zeros at bits 8-13 (6 zeros), so mask has 6 zeros.
// For AND W8, W8, #0xFFFFC0FF:
// Use a pre-computed value from Python: asm("and w8, w8, #0xFFFFC0FF")
// Python result: 0x12126508 bytes: 08 65 12 12
let insn: UInt32 = 0x1212_6508
return ARM64.encodeU32(insn)
}
/// ORR W8, W8, #0xC (CS_GET_TASK_ALLOW | CS_INSTALLER)
private func encodeORR_W8_W8_imm(_: UInt32) -> Data {
// ORR W8, W8, #0xC
// 0xC = bit 2 and bit 3 set
// Python result: asm("orr w8, w8, #0xC") 0x321e0508
let insn: UInt32 = 0x321E_0508
return ARM64.encodeU32(insn)
}
}

View File

@@ -0,0 +1,118 @@
// KernelJBPatchDounmount.swift JB: NOP the upstream cleanup call in dounmount.
//
// Python source: scripts/patchers/kernel_jb_patch_dounmount.py
//
// Reveal: string-anchor "dounmount:" find the unique near-tail 4-arg zeroed cleanup
// call: mov x0,xN ; mov w1,#0 ; mov w2,#0 ; mov w3,#0 ; bl ; mov x0,xN ; bl ; cbz x19,...
// Patch: NOP the first BL in that sequence.
import Capstone
import Foundation
extension KernelJBPatcher {
/// NOP the upstream cleanup call in _dounmount.
@discardableResult
func patchDounmount() -> Bool {
log("\n[JB] _dounmount: upstream cleanup-call NOP")
guard let foff = findFuncByString("dounmount:") else {
log(" [-] 'dounmount:' anchor not found")
return false
}
let funcEnd = findFuncEnd(foff, maxSize: 0x4000)
guard let patchOff = findUpstreamCleanupCall(foff, end: funcEnd) else {
log(" [-] upstream dounmount cleanup call not found")
return false
}
emit(patchOff, ARM64.nop,
patchID: "jb.dounmount.nop_cleanup_bl",
virtualAddress: fileOffsetToVA(patchOff),
description: "NOP [_dounmount upstream cleanup call]")
return true
}
// MARK: - Private helpers
/// Find a function that contains a reference to `string` (null-terminated).
private func findFuncByString(_ string: String) -> Int? {
guard let strOff = buffer.findString(string) else { return nil }
let refs = findStringRefs(strOff)
guard let firstRef = refs.first else { return nil }
return findFunctionStart(firstRef.adrpOff)
}
/// Scan for the 8-instruction upstream cleanup call pattern and return
/// the file offset of the first BL, or nil if not uniquely found.
private func findUpstreamCleanupCall(_ start: Int, end: Int) -> Int? {
var hits: [Int] = []
let limit = end - 0x1C
guard start < limit else { return nil }
for off in stride(from: start, to: limit, by: 4) {
let insns = disasm.disassemble(in: buffer.data, at: off, count: 8)
guard insns.count >= 8 else { continue }
let i0 = insns[0], i1 = insns[1], i2 = insns[2], i3 = insns[3]
let i4 = insns[4], i5 = insns[5], i6 = insns[6], i7 = insns[7]
// mov x0, <xreg> ; mov w1,#0 ; mov w2,#0 ; mov w3,#0 ; bl ; mov x0,<same> ; bl ; cbz x..
guard i0.mnemonic == "mov", i1.mnemonic == "mov",
i2.mnemonic == "mov", i3.mnemonic == "mov" else { continue }
guard i4.mnemonic == "bl", i5.mnemonic == "mov",
i6.mnemonic == "bl", i7.mnemonic == "cbz" else { continue }
// i0: mov x0, <src_reg>
guard let srcReg = movRegRegDst(i0, dst: "x0") else { continue }
// i1: mov w1, #0
guard movImmZero(i1, dst: "w1") else { continue }
// i2: mov w2, #0
guard movImmZero(i2, dst: "w2") else { continue }
// i3: mov w3, #0
guard movImmZero(i3, dst: "w3") else { continue }
// i5: mov x0, <same src_reg>
guard let src5 = movRegRegDst(i5, dst: "x0"), src5 == srcReg else { continue }
// i7: cbz x<reg>, ...
guard cbzUsesXreg(i7) else { continue }
hits.append(Int(i4.address))
}
if hits.count == 1 { return hits[0] }
return nil
}
/// Return the source register name if instruction is `mov <dst>, <src_reg>`.
private func movRegRegDst(_ insn: Instruction, dst: String) -> String? {
guard insn.mnemonic == "mov" else { return nil }
guard let detail = insn.aarch64, detail.operands.count == 2 else { return nil }
let dstOp = detail.operands[0], srcOp = detail.operands[1]
guard dstOp.type == AARCH64_OP_REG, srcOp.type == AARCH64_OP_REG else { return nil }
guard regName(dstOp.reg) == dst else { return nil }
return regName(srcOp.reg)
}
/// Return true if instruction is `mov <dst>, #0`.
private func movImmZero(_ insn: Instruction, dst: String) -> Bool {
guard insn.mnemonic == "mov" else { return false }
guard let detail = insn.aarch64, detail.operands.count == 2 else { return false }
let dstOp = detail.operands[0], srcOp = detail.operands[1]
guard dstOp.type == AARCH64_OP_REG, regName(dstOp.reg) == dst else { return false }
guard srcOp.type == AARCH64_OP_IMM, srcOp.imm == 0 else { return false }
return true
}
/// Return true if instruction is `cbz x<N>, <label>` (64-bit register).
private func cbzUsesXreg(_ insn: Instruction) -> Bool {
guard insn.mnemonic == "cbz" else { return false }
guard let detail = insn.aarch64, detail.operands.count >= 2 else { return false }
let regOp = detail.operands[0]
guard regOp.type == AARCH64_OP_REG else { return false }
return regName(regOp.reg).hasPrefix("x")
}
/// Get the register name string for an aarch64_reg value.
private func regName(_ reg: aarch64_reg) -> String {
disasm.registerName(reg.rawValue) ?? "??"
}
}

View File

@@ -0,0 +1,449 @@
// KernelJBPatchHookCredLabel.swift JB kernel patch: Faithful upstream C23 hook
//
// Python source: scripts/patchers/kernel_jb_patch_hook_cred_label.py
//
// Strategy (faithful upstream C23): Redirect mac_policy_ops[18]
// (_hook_cred_label_update_execve sandbox wrapper) to a code cave that:
// 1. Saves all argument registers + frame.
// 2. Calls vfs_context_current() to get the vfs context.
// 3. Calls vnode_getattr(vp, vap, ctx) to get owner/mode attributes.
// 4. If VSUID or VSGID bits set: copies owner uid/gid into the pending
// credential and sets P_SUGID.
// 5. Restores all registers and branches back to the original sandbox wrapper.
//
// The ops[18] entry is an auth-rebase chained pointer. We re-encode it
// preserving PAC metadata but changing the target to our cave address.
import Capstone
import Foundation
extension KernelJBPatcher {
// MARK: - Constants
private static let hookCredLabelIndex = 18
private static let c23CaveWords = 46 // Must match Python _C23_CAVE_WORDS
// Expected shape of vfs_context_current prologue (5 words).
// Python: _VFS_CONTEXT_CURRENT_SHAPE
private static let vfsContextCurrentShape: [UInt32] = [
ARM64.pacibspU32, // pacibsp
ARM64.stpFP_LR_pre, // stp x29, x30, [sp, #-0x10]!
ARM64.movFP_SP, // mov x29, sp
ARM64.mrs_x0_tpidr_el1, // mrs x0, tpidr_el1
ARM64.ldr_x1_x0_0x3e0, // ldr x1, [x0, #0x3e0]
]
// MARK: - Entry Point
/// Faithful upstream C23: redirect ops[18] to a vnode-getattr trampoline.
@discardableResult
func patchHookCredLabelUpdateExecve() -> Bool {
log("\n[JB] _hook_cred_label_update_execve: faithful upstream C23")
// 1. Find sandbox ops[18] entry and current wrapper target.
guard let (opsTable, entryOff, entryRaw, wrapperOff) = findHookCredLabelWrapper() else {
return false
}
// 2. Find vfs_context_current by prologue shape scan.
let vfsCtxOff = findVfsContextCurrentByShape()
guard vfsCtxOff >= 0 else {
log(" [-] vfs_context_current not resolved")
return false
}
// 3. Find vnode_getattr by BL scan near its log string.
let vnodeGetattrOff = findVnodeGetattrViaString()
guard vnodeGetattrOff >= 0 else {
log(" [-] vnode_getattr not resolved")
return false
}
// 4. Allocate code cave for 46 instructions (184 bytes).
let caveSize = Self.c23CaveWords * 4
guard let caveOff = findCodeCave(size: caveSize) else {
log(" [-] no executable code cave found for faithful C23 (\(caveSize) bytes)")
return false
}
// 5. Build the C23 shellcode.
guard let caveBytes = buildC23Cave(
caveOff: caveOff,
vfsContextCurrentOff: vfsCtxOff,
vnodeGetattrOff: vnodeGetattrOff,
wrapperOff: wrapperOff
) else {
log(" [-] failed to encode faithful C23 branch/call relocations")
return false
}
// 6. Retarget ops[18] to cave.
guard let newEntry = encodeAuthRebaseLike(origVal: entryRaw, targetFoff: caveOff) else {
log(" [-] failed to encode hook ops entry retarget")
return false
}
emit(entryOff, newEntry,
patchID: "jb.hook_cred_label.ops_retarget",
description: "retarget ops[\(Self.hookCredLabelIndex)] to faithful C23 cave [_hook_cred_label_update_execve]")
emit(caveOff, caveBytes,
patchID: "jb.hook_cred_label.c23_cave",
description: "faithful upstream C23 cave (vnode getattr -> uid/gid/P_SUGID fixup -> wrapper)")
_ = opsTable
return true
}
// MARK: - Sandbox Ops Table Finder
/// Find the sandbox mac_policy_ops table via the mac_policy_conf struct.
/// Mirrors Python `_find_sandbox_ops_table_via_conf()`: locates the conf
/// struct by its "Sandbox" + "Seatbelt sandbox policy" string pair, then
/// reads the mpc_ops pointer at conf+32.
private func findSandboxOpsTable() -> Int? {
guard let seatbeltOff = buffer.findString("Seatbelt sandbox policy") else {
log(" [-] Sandbox/Seatbelt strings not found")
return nil
}
// Find "\0Sandbox\0" and return offset of 'S'
guard let sandboxPattern = "\u{0}Sandbox\u{0}".data(using: .utf8),
let sandboxRange = buffer.data.range(of: sandboxPattern)
else {
log(" [-] Sandbox string not found")
return nil
}
let sandboxOff = sandboxRange.lowerBound + 1 // skip leading NUL
// Collect __DATA_CONST and __DATA segment ranges.
var dataRanges: [(Int, Int)] = []
for seg in segments {
if seg.name == "__DATA_CONST" || seg.name == "__DATA", seg.fileSize > 0 {
let s = Int(seg.fileOffset)
dataRanges.append((s, s + Int(seg.fileSize)))
}
}
for (dStart, dEnd) in dataRanges {
var i = dStart
while i <= dEnd - 40 {
defer { i += 8 }
let val = buffer.readU64(at: i)
if val == 0 || (val & (1 << 63)) != 0 { continue }
guard (val & 0x7FF_FFFF_FFFF) == UInt64(sandboxOff) else { continue }
let val2 = buffer.readU64(at: i + 8)
if (val2 & (1 << 63)) != 0 { continue }
guard (val2 & 0x7FF_FFFF_FFFF) == UInt64(seatbeltOff) else { continue }
let valOps = buffer.readU64(at: i + 32)
if (valOps & (1 << 63)) == 0 {
let opsOff = Int(valOps & 0x7FF_FFFF_FFFF)
log(" [+] mac_policy_conf at foff 0x\(String(format: "%X", i)), mpc_ops -> 0x\(String(format: "%X", opsOff))")
return opsOff
}
}
}
log(" [-] mac_policy_conf not found")
return nil
}
/// Find the sandbox ops[18] wrapper, returning (opsTable, entryOff, entryRaw, wrapperOff).
private func findHookCredLabelWrapper() -> (Int, Int, UInt64, Int)? {
guard let opsTable = findSandboxOpsTable() else {
log(" [-] sandbox ops table not found")
return nil
}
let entryOff = opsTable + Self.hookCredLabelIndex * 8
guard entryOff + 8 <= buffer.count else {
log(" [-] hook ops entry outside file")
return nil
}
let entryRaw = buffer.data.withUnsafeBytes { $0.load(fromByteOffset: entryOff, as: UInt64.self) }
guard entryRaw != 0 else {
log(" [-] hook ops entry is null")
return nil
}
guard (entryRaw & (1 << 63)) != 0 else {
log(" [-] hook ops entry is not auth-rebase encoded: 0x\(String(format: "%016X", entryRaw))")
return nil
}
let wrapperOff = decodeChainedPtr(entryRaw)
guard wrapperOff >= 0 else {
log(" [-] decoded wrapper target invalid: 0x\(String(format: "%X", Int(entryRaw & 0x3FFF_FFFF)))")
return nil
}
let inCode = codeRanges.contains { wrapperOff >= $0.start && wrapperOff < $0.end }
guard inCode else {
log(" [-] wrapper target not in code range: 0x\(String(format: "%X", wrapperOff))")
return nil
}
log(" [+] hook cred-label wrapper ops[\(Self.hookCredLabelIndex)] entry 0x\(String(format: "%X", entryOff)) -> 0x\(String(format: "%X", wrapperOff))")
return (opsTable, entryOff, entryRaw, wrapperOff)
}
// MARK: - vfs_context_current Finder
/// Locate vfs_context_current by its unique 5-word prologue pattern.
private func findVfsContextCurrentByShape() -> Int {
let cacheKey = "c23_vfs_context_current"
if let cached = jbScanCache[cacheKey] { return cached }
guard let (ks, ke) = kernTextRange else {
jbScanCache[cacheKey] = -1
return -1
}
let pat = Self.vfsContextCurrentShape
var hits: [Int] = []
var off = ks
while off + pat.count * 4 <= ke {
var match = true
for i in 0 ..< pat.count {
if buffer.readU32(at: off + i * 4) != pat[i] {
match = false
break
}
}
if match { hits.append(off) }
off += 4
}
let result = hits.count == 1 ? hits[0] : -1
if result >= 0 {
log(" [+] vfs_context_current body at 0x\(String(format: "%X", result)) (shape match)")
} else {
log(" [-] vfs_context_current shape scan ambiguous (\(hits.count) hits)")
}
jbScanCache[cacheKey] = result
return result
}
// MARK: - vnode_getattr Finder
/// Resolve vnode_getattr from a BL near its log string "vnode_getattr".
private func findVnodeGetattrViaString() -> Int {
guard let strOff = buffer.findString("vnode_getattr") else { return -1 }
// Scan for references to this and nearby instances
var searchStart = strOff
for _ in 0 ..< 6 {
let refs = findStringRefs(searchStart)
if let ref = refs.first {
let refOff = ref.adrpOff
// Scan back 80 bytes from the ref for a BL
var scanOff = max(0, refOff - 80)
while scanOff < refOff {
let insn = buffer.readU32(at: scanOff)
if insn >> 26 == 0b100101 { // BL
let imm26 = insn & 0x03FF_FFFF
let signedImm = Int32(bitPattern: imm26 << 6) >> 6
let target = scanOff + Int(signedImm) * 4
let inCode = codeRanges.contains { target >= $0.start && target < $0.end }
if inCode {
log(" [+] vnode_getattr at 0x\(String(format: "%X", target)) (via BL at 0x\(String(format: "%X", scanOff)))")
return target
}
}
scanOff += 4
}
}
// Try next occurrence
guard let nextOff = buffer.findString("vnode_getattr", from: searchStart + 1) else { break }
searchStart = nextOff
}
return -1
}
// MARK: - C23 Cave Builder
/// Build the faithful upstream C23 shellcode (exactly 46 instructions = 184 bytes).
///
/// Layout (instruction offsets from caveOff):
/// 0: nop
/// 1: cbz x3, #0xa8 (skip if arg3=vp is null)
/// 2: sub sp, sp, #0x400
/// 3: stp x29, x30, [sp]
/// 4: stp x0, x1, [sp, #0x10]
/// 5: stp x2, x3, [sp, #0x20]
/// 6: stp x4, x5, [sp, #0x30]
/// 7: stp x6, x7, [sp, #0x40]
/// 8: nop
/// 9: bl vfs_context_current (position-dependent)
/// 10: mov x2, x0
/// 11: ldr x0, [sp, #0x28] (vp = saved x3)
/// 12: add x1, sp, #0x80 (vap = &stack_vap)
/// 13: mov w8, #0x380 (va_supported bitmask)
/// 14: stp xzr, x8, [x1] (vap->va_active = 0, vap->va_supported = 0x380)
/// 15: stp xzr, xzr, [x1, #0x10]
/// 16: nop
/// 17: bl vnode_getattr (position-dependent)
/// 18: cbnz x0, #0x4c (skip fixup on error)
/// 19: mov w2, #0
/// 20: ldr w8, [sp, #0xcc] (vap + offset for va_mode bits)
/// 21: tbz w8, #0xb, #0x14
/// 22: ldr w8, [sp, #0xc4]
/// 23: ldr x0, [sp, #0x18] (new ucred*)
/// 24: str w8, [x0, #0x18] (ucred->cr_uid)
/// 25: mov w2, #1
/// 26: ldr w8, [sp, #0xcc]
/// 27: tbz w8, #0xa, #0x14
/// 28: mov w2, #1
/// 29: ldr w8, [sp, #0xc8]
/// 30: ldr x0, [sp, #0x18]
/// 31: str w8, [x0, #0x28] (ucred->cr_gid)
/// 32: cbz w2, #0x14 (if nothing changed, skip P_SUGID)
/// 33: ldr x0, [sp, #0x20] (proc*)
/// 34: ldr w8, [x0, #0x454]
/// 35: orr w8, w8, #0x100 (set P_SUGID)
/// 36: str w8, [x0, #0x454]
/// 37: ldp x0, x1, [sp, #0x10]
/// 38: ldp x2, x3, [sp, #0x20]
/// 39: ldp x4, x5, [sp, #0x30]
/// 40: ldp x6, x7, [sp, #0x40]
/// 41: ldp x29, x30, [sp]
/// 42: add sp, sp, #0x400
/// 43: nop
/// 44: b wrapperOff (position-dependent)
/// 45: nop
private func buildC23Cave(
caveOff: Int,
vfsContextCurrentOff: Int,
vnodeGetattrOff: Int,
wrapperOff: Int
) -> Data? {
var code: [Data] = []
// 0: nop
code.append(ARM64.nop)
// 1: cbz x3, #0xa8 (skip entire body = 42 instructions forward)
code.append(encodeU32(ARM64.c23_cbzX3_0xA8))
// 2: sub sp, sp, #0x400
code.append(encodeU32(ARM64.c23_subSP_0x400))
// 3: stp x29, x30, [sp]
code.append(encodeU32(ARM64.c23_stpFP_LR))
// 4: stp x0, x1, [sp, #0x10]
code.append(encodeU32(ARM64.c23_stpX0X1_0x10))
// 5: stp x2, x3, [sp, #0x20]
code.append(encodeU32(ARM64.c23_stpX2X3_0x20))
// 6: stp x4, x5, [sp, #0x30]
code.append(encodeU32(ARM64.c23_stpX4X5_0x30))
// 7: stp x6, x7, [sp, #0x40]
code.append(encodeU32(ARM64.c23_stpX6X7_0x40))
// 8: nop
code.append(ARM64.nop)
// 9: bl vfs_context_current
let blVfsOff = caveOff + code.count * 4
guard let blVfs = encodeBL(from: blVfsOff, to: vfsContextCurrentOff) else { return nil }
code.append(blVfs)
// 10: mov x2, x0
code.append(encodeU32(ARM64.c23_movX2_X0))
// 11: ldr x0, [sp, #0x28] (saved x3 = vp)
code.append(encodeU32(ARM64.c23_ldrX0_sp_0x28))
// 12: add x1, sp, #0x80
code.append(encodeU32(ARM64.c23_addX1_sp_0x80))
// 13: mov w8, #0x380
code.append(encodeU32(ARM64.c23_movzW8_0x380))
// 14: stp xzr, x8, [x1]
code.append(encodeU32(ARM64.c23_stpXZR_X8))
// 15: stp xzr, xzr, [x1, #0x10]
code.append(encodeU32(ARM64.c23_stpXZR_XZR_0x10))
// 16: nop
code.append(ARM64.nop)
// 17: bl vnode_getattr
let blGetAttrOff = caveOff + code.count * 4
guard let blGetAttr = encodeBL(from: blGetAttrOff, to: vnodeGetattrOff) else { return nil }
code.append(blGetAttr)
// 18: cbnz x0, #0x4c (skip 19 instructions)
code.append(encodeU32(ARM64.c23_cbnzX0_0x4c))
// 19: mov w2, #0
code.append(encodeU32(ARM64.c23_movW2_0))
// 20: ldr w8, [sp, #0xcc]
code.append(encodeU32(ARM64.c23_ldrW8_sp_0xcc))
// 21: tbz w8, #0xb, #0x14 (skip 5 instrs)
code.append(encodeU32(ARM64.c23_tbzW8_11_0x14))
// 22: ldr w8, [sp, #0xc4]
code.append(encodeU32(ARM64.c23_ldrW8_sp_0xc4))
// 23: ldr x0, [sp, #0x18]
code.append(encodeU32(ARM64.c23_ldrX0_sp_0x18))
// 24: str w8, [x0, #0x18]
code.append(encodeU32(ARM64.c23_strW8_x0_0x18))
// 25: mov w2, #1
code.append(encodeU32(ARM64.c23_movW2_1))
// 26: ldr w8, [sp, #0xcc]
code.append(encodeU32(ARM64.c23_ldrW8_sp_0xcc))
// 27: tbz w8, #0xa, #0x14 (skip 5 instrs)
code.append(encodeU32(ARM64.c23_tbzW8_10_0x14))
// 28: mov w2, #1
code.append(encodeU32(ARM64.c23_movW2_1))
// 29: ldr w8, [sp, #0xc8]
code.append(encodeU32(ARM64.c23_ldrW8_sp_0xc8))
// 30: ldr x0, [sp, #0x18]
code.append(encodeU32(ARM64.c23_ldrX0_sp_0x18))
// 31: str w8, [x0, #0x28]
code.append(encodeU32(ARM64.c23_strW8_x0_0x28))
// 32: cbz w2, #0x14 (skip 5 instrs)
code.append(encodeU32(ARM64.c23_cbzW2_0x14))
// 33: ldr x0, [sp, #0x20]
code.append(encodeU32(ARM64.c23_ldrX0_sp_0x20))
// 34: ldr w8, [x0, #0x454]
code.append(encodeU32(ARM64.c23_ldrW8_x0_0x454))
// 35: orr w8, w8, #0x100
code.append(encodeU32(ARM64.c23_orrW8_0x100))
// 36: str w8, [x0, #0x454]
code.append(encodeU32(ARM64.c23_strW8_x0_0x454))
// 37: ldp x0, x1, [sp, #0x10]
code.append(encodeU32(ARM64.c23_ldpX0X1_0x10))
// 38: ldp x2, x3, [sp, #0x20]
code.append(encodeU32(ARM64.c23_ldpX2X3_0x20))
// 39: ldp x4, x5, [sp, #0x30]
code.append(encodeU32(ARM64.c23_ldpX4X5_0x30))
// 40: ldp x6, x7, [sp, #0x40]
code.append(encodeU32(ARM64.c23_ldpX6X7_0x40))
// 41: ldp x29, x30, [sp]
code.append(encodeU32(ARM64.c23_ldpFP_LR))
// 42: add sp, sp, #0x400
code.append(encodeU32(ARM64.c23_addSP_0x400))
// 43: nop
code.append(ARM64.nop)
// 44: b wrapperOff
let branchBackOff = caveOff + code.count * 4
guard let branchBack = encodeB(from: branchBackOff, to: wrapperOff) else { return nil }
code.append(branchBack)
// 45: nop
code.append(ARM64.nop)
guard code.count == Self.c23CaveWords else {
log(" [-] C23 cave length drifted: \(code.count) insns, expected \(Self.c23CaveWords)")
return nil
}
return code.reduce(Data(), +)
}
// MARK: - Auth-Rebase Pointer Encoder
/// Retarget an auth-rebase chained pointer while preserving PAC metadata.
private func encodeAuthRebaseLike(origVal: UInt64, targetFoff: Int) -> Data? {
guard (origVal & (1 << 63)) != 0 else { return nil }
// Preserve all bits above bit 31, replace the low 32 bits with target foff
let newVal = (origVal & ~UInt64(0xFFFF_FFFF)) | (UInt64(targetFoff) & 0xFFFF_FFFF)
return withUnsafeBytes(of: newVal.littleEndian) { Data($0) }
}
// MARK: - Encoding Helper
private func encodeU32(_ value: UInt32) -> Data {
ARM64.encodeU32(value)
}
}

View File

@@ -0,0 +1,146 @@
// KernelJBPatchIoucMacf.swift JB kernel patch: IOUC MACF gate bypass
//
// Python source: scripts/patchers/kernel_jb_patch_iouc_macf.py
//
// Strategy:
// 1. Locate the "IOUC %s failed MACF in process %s" format string.
// 2. For each ADRP+ADD xref, find the enclosing function.
// 3. Within a window before the format-string ADRP, search for the pattern:
// BL <mac_aggregator> ; calls MACF dispatch
// CBZ W0, <allow> ; skips deny path if MACF allowed
// where <mac_aggregator> has the characteristic shape:
// LDR X10, [X10, #0x9e8] ; slot load from mac_policy_list
// BLRAA/BLRAB/BLR X10 ; indirect call
// 4. Confirm that an ADRP referencing the fail-log string appears in
// the deny block (between CBZ and the end of the function).
// 5. Replace CBZ W0, <allow> with unconditional B <allow>.
import Foundation
extension KernelJBPatcher {
/// IOUC MACF gate bypass: replace CBZ W0, <allow> with B <allow>.
@discardableResult
func patchIoucFailedMacf() -> Bool {
log("\n[JB] IOUC MACF gate: branch-level deny bypass")
guard let failStrOff = buffer.findString("IOUC %s failed MACF in process %s") else {
log(" [-] IOUC failed-MACF format string not found")
return false
}
let refs = findStringRefs(failStrOff)
guard !refs.isEmpty else {
log(" [-] no xrefs for IOUC failed-MACF format string")
return false
}
guard let codeRange = codeRanges.first else { return false }
let _ = codeRange // used implicitly via findFunctionStart / findFuncEnd
for (adrpOff, _) in refs {
guard let funcStart = findFunctionStart(adrpOff) else { continue }
let funcEnd = findFuncEnd(funcStart, maxSize: 0x2000)
// Search for BL + CBZ W0 pair in the window before the ADRP.
let searchStart = max(funcStart, adrpOff - 0x120)
let searchEnd = min(funcEnd, adrpOff + 4)
var off = searchStart
while off < searchEnd - 4 {
defer { off += 4 }
// Require BL at [off].
guard let blTarget = jbDecodeBL(at: off) else { continue }
// Require CBZ W0, <target> at [off + 4].
let cbzInsn = buffer.readU32(at: off + 4)
guard isCbzW0(cbzInsn) else { continue }
// Check that the BL target looks like a MACF aggregator.
guard hasMacfAggregatorShape(at: blTarget) else { continue }
// Decode the CBZ allow-target.
guard let allowTarget = decodeCBZTarget(insn: cbzInsn, at: off + 4) else { continue }
// Allow target must be forward and within the function.
guard allowTarget > off, allowTarget < funcEnd else { continue }
// Verify that the fail-log ADRP is in the deny block (after CBZ).
let failAdrpExpected = adrpOff
guard failAdrpExpected > off + 4, failAdrpExpected < min(funcEnd, off + 0x80) else { continue }
// Encode unconditional B to allowTarget.
guard let patchBytes = ARM64Encoder.encodeB(from: off + 4, to: allowTarget) else { continue }
log(" [+] IOUC MACF gate fn=0x\(String(format: "%X", funcStart)), bl=0x\(String(format: "%X", off)), cbz=0x\(String(format: "%X", off + 4)), allow=0x\(String(format: "%X", allowTarget))")
let delta = allowTarget - (off + 4)
let va = fileOffsetToVA(off + 4)
emit(off + 4, patchBytes,
patchID: "iouc_macf_gate",
virtualAddress: va,
description: "b #0x\(String(format: "%X", delta)) [IOUC MACF deny → allow]")
return true
}
}
log(" [-] narrow IOUC MACF deny branch not found")
return false
}
// MARK: - Private helpers
/// Return true if `insn` is CBZ W0, <any>.
private func isCbzW0(_ insn: UInt32) -> Bool {
// CBZ 32-bit: bits[31]=0, bits[30:25]=011010, bits[4:0]=Rt
// Encoding: 0_011_0100_imm19_Rt high byte = 0x34
let op = (insn >> 24) & 0xFF
guard op == 0x34 else { return false } // CBZ W (not X, not CBNZ)
return (insn & 0x1F) == 0 // Rt == W0
}
/// Decode a CBZ/CBNZ target from an instruction at `pc`.
private func decodeCBZTarget(insn: UInt32, at pc: Int) -> Int? {
// CBZ/CBNZ: bits[23:5] = imm19, sign-extended, scaled by 4
let imm19 = (insn >> 5) & 0x7FFFF
let signedImm = Int32(bitPattern: imm19 << 13) >> 13
return pc + Int(signedImm) * 4
}
/// Heuristic: does the function at `calleeOff` look like a MACF aggregator?
///
/// The aggregator loads from the mac_policy_list slot at offset 0x9E8 and
/// makes an indirect call through that pointer. We look for:
/// LDR X10, [X10, #0x9e8] (slot load)
/// BLRAA/BLRAB/BLR X10 (indirect dispatch)
private func hasMacfAggregatorShape(at calleeOff: Int) -> Bool {
guard calleeOff >= 0, calleeOff < buffer.count else { return false }
let funcEnd = findFuncEnd(calleeOff, maxSize: 0x400)
// LDR X10, [X10, #0x9e8]:
// LDR (unsigned offset) Xt, [Xn, #imm]: size=11(X), opc=01
// bits[31:22]=1111_1001_01, imm12=offset>>3, Rn, Rt
// imm12 = 0x9e8 >> 3 = 0x13D
// Rn = X10 = 10, Rt = X10 = 10
// Full: 0xF9400000 | (0x13D << 10) | (10 << 5) | 10 = 0xF944F54A
let ldrX10SlotVal: UInt32 = 0xF944_F54A
// BLRAA X10 = 0xD73F0940, BLRAB X10 = 0xD73F0D40, BLR X10 = 0xD63F0140
let blraaX10: UInt32 = 0xD73F_0940
let blrabX10: UInt32 = 0xD73F_0D40
let blrX10: UInt32 = 0xD63F_0140
var sawSlotLoad = false
var sawIndirectCall = false
var off = calleeOff
while off < funcEnd {
let insn = buffer.readU32(at: off)
if insn == ldrX10SlotVal { sawSlotLoad = true }
if insn == blraaX10 || insn == blrabX10 || insn == blrX10 { sawIndirectCall = true }
if sawSlotLoad, sawIndirectCall { return true }
off += 4
}
return false
}
}

View File

@@ -0,0 +1,324 @@
// KernelJBPatchKcall10.swift JB kernel patch: kcall10 ABI-correct sysent[439] cave
//
// Python source: scripts/patchers/kernel_jb_patch_kcall10.py
//
// Strategy: Replace SYS_kas_info (sysent[439]) with a cave implementing
// the kcall10 primitive:
// uap[0] = target function pointer
// uap[1..7] = arg0..arg6
// Returns 64-bit X0 via retval and _SYSCALL_RET_UINT64_T.
//
// The cave is a standard ARM64e function body with PACIBSP/RETAB.
// The sysent entries use arm64e chained auth-rebase fixup pointers.
import Foundation
extension KernelJBPatcher {
// MARK: - Constants
private static let sysent_max_entries = 558
private static let sysent_entry_size = 24
private static let sysent_pac_diversity: UInt32 = 0xBCAD
// kcall10 semantics
private static let kcall10_narg: UInt16 = 8
private static let kcall10_arg_bytes: UInt16 = 32 // 8 * 4
private static let kcall10_return_type: UInt32 = 7 // _SYSCALL_RET_UINT64_T
private static let kcall10_einval: UInt32 = 22
// MARK: - Entry Point
/// ABI-correct kcall10 patch: install a sysent[439] cave.
@discardableResult
func patchKcall10() -> Bool {
log("\n[JB] kcall10: ABI-correct sysent[439] cave")
// 1. Find _nosys.
guard let nosysOff = resolveSymbol("_nosys") ?? findNosys() else {
log(" [-] _nosys not found")
return false
}
// 2. Find sysent table base.
guard let sysEntOff = findSysentTable(nosysOff: nosysOff) else {
log(" [-] sysent table not found")
return false
}
let entry439 = sysEntOff + 439 * Self.sysent_entry_size
// 3. Find a reusable 8-arg munge32 helper.
let (mungerTarget, _, matchCount) = findMunge32ForNarg(
sysEntOff: sysEntOff,
narg: Self.kcall10_narg,
argBytes: Self.kcall10_arg_bytes
)
guard mungerTarget >= 0 else {
log(" [-] no unique reusable 8-arg munge32 helper found")
return false
}
// 4. Build cave and allocate.
let caveBytes = buildKcall10Cave()
guard let caveOff = findCodeCave(size: caveBytes.count) else {
log(" [-] no executable code cave found for kcall10")
return false
}
// 5. Read original sysent[439] chain metadata.
guard entry439 + Self.sysent_entry_size <= buffer.count else {
log(" [-] sysent[439] outside file")
return false
}
let oldSyCallRaw = buffer.data.withUnsafeBytes { $0.load(fromByteOffset: entry439, as: UInt64.self) }
let callNext = extractChainNext(oldSyCallRaw)
let oldMungeRaw = buffer.data.withUnsafeBytes { $0.load(fromByteOffset: entry439 + 8, as: UInt64.self) }
let mungeNext = extractChainNext(oldMungeRaw)
let mungeDiv = extractChainDiversity(oldMungeRaw)
let mungeAddrDiv = extractChainAddrDiv(oldMungeRaw)
let mungeKey = extractChainKey(oldMungeRaw)
log(" [+] sysent table at file offset 0x\(String(format: "%X", sysEntOff))")
log(" [+] sysent[439] entry at 0x\(String(format: "%X", entry439))")
log(" [+] reusing unique 8-arg munge32 target 0x\(String(format: "%X", mungerTarget)) (\(matchCount) matching sysent rows)")
log(" [+] cave at 0x\(String(format: "%X", caveOff)) (0x\(String(format: "%X", caveBytes.count)) bytes)")
// 6. Emit patches.
emit(caveOff, caveBytes,
patchID: "jb.kcall10.cave",
description: "kcall10 ABI-correct cave (target + 7 args -> uint64 x0)")
emit(entry439,
encodeChainedAuthPtr(targetFoff: caveOff, nextVal: callNext,
diversity: Self.sysent_pac_diversity, key: 0, addrDiv: 0),
patchID: "jb.kcall10.sy_call",
description: "sysent[439].sy_call = cave 0x\(String(format: "%X", caveOff)) (auth rebase, div=0xBCAD, next=\(callNext)) [kcall10]")
emit(entry439 + 8,
encodeChainedAuthPtr(targetFoff: mungerTarget, nextVal: mungeNext,
diversity: mungeDiv, key: mungeKey, addrDiv: mungeAddrDiv),
patchID: "jb.kcall10.sy_munge",
description: "sysent[439].sy_arg_munge32 = 8-arg helper 0x\(String(format: "%X", mungerTarget)) [kcall10]")
// sy_return_type (u32) + sy_narg (u16) + sy_arg_bytes (u16)
var metadata = Data(count: 8)
metadata.withUnsafeMutableBytes { ptr in
ptr.storeBytes(of: Self.kcall10_return_type.littleEndian, toByteOffset: 0, as: UInt32.self)
ptr.storeBytes(of: Self.kcall10_narg.littleEndian, toByteOffset: 4, as: UInt16.self)
ptr.storeBytes(of: Self.kcall10_arg_bytes.littleEndian, toByteOffset: 6, as: UInt16.self)
}
emit(entry439 + 16, metadata,
patchID: "jb.kcall10.sysent_meta",
description: "sysent[439].sy_return_type=7,sy_narg=8,sy_arg_bytes=0x20 [kcall10]")
return true
}
// MARK: - Sysent Table Finder
/// Find the real sysent table base by locating a _nosys entry then scanning backward.
private func findSysentTable(nosysOff: Int) -> Int? {
var nosysEntry = -1
var segStart = -1
// Scan DATA segments for an entry whose decoded pointer == nosysOff
for seg in segments {
guard seg.name.contains("DATA") else { continue }
let sStart = Int(seg.fileOffset)
let sEnd = sStart + Int(seg.fileSize)
var off = sStart
while off + Self.sysent_entry_size <= sEnd {
let val = buffer.data.withUnsafeBytes { $0.load(fromByteOffset: off, as: UInt64.self) }
let decoded = decodeChainedPtr(val)
if decoded == nosysOff {
// Confirm: next entry also decodes to a code-range address
let val2 = buffer.data.withUnsafeBytes { $0.load(fromByteOffset: off + Self.sysent_entry_size, as: UInt64.self) }
let dec2 = decodeChainedPtr(val2)
let inCode = dec2 > 0 && codeRanges.contains { dec2 >= $0.start && dec2 < $0.end }
if inCode {
nosysEntry = off
segStart = sStart
break
}
}
off += 8
}
if nosysEntry >= 0 { break }
}
guard nosysEntry >= 0 else { return nil }
log(" [*] _nosys entry found at foff 0x\(String(format: "%X", nosysEntry)), scanning backward for table start")
// Scan backward in sysent_entry_size steps to find table base
var base = nosysEntry
var entriesBack = 0
while base - Self.sysent_entry_size >= segStart {
guard entriesBack < Self.sysent_max_entries else { break }
let prev = base - Self.sysent_entry_size
let val = buffer.data.withUnsafeBytes { $0.load(fromByteOffset: prev, as: UInt64.self) }
let decoded = decodeChainedPtr(val)
guard decoded > 0 else { break }
let inCode = codeRanges.contains { decoded >= $0.start && decoded < $0.end }
guard inCode else { break }
// Check narg and arg_bytes for sanity
let narg: UInt16 = buffer.data.withUnsafeBytes { $0.load(fromByteOffset: prev + 20, as: UInt16.self) }
let argBytes: UInt16 = buffer.data.withUnsafeBytes { $0.load(fromByteOffset: prev + 22, as: UInt16.self) }
guard narg <= 12, argBytes <= 96 else { break }
base = prev
entriesBack += 1
}
log(" [+] sysent table base at foff 0x\(String(format: "%X", base)) (\(entriesBack) entries before first _nosys)")
return base
}
// MARK: - Munger Finder
/// Find a reusable 8-arg munge32 helper with matching metadata.
/// Returns (targetFoff, exemplarEntry, matchCount) or (-1, -1, 0).
private func findMunge32ForNarg(
sysEntOff: Int,
narg: UInt16,
argBytes: UInt16
) -> (Int, Int, Int) {
var candidates: [Int: [Int]] = [:]
for idx in 0 ..< Self.sysent_max_entries {
let entry = sysEntOff + idx * Self.sysent_entry_size
guard entry + Self.sysent_entry_size <= buffer.count else { break }
let curNarg: UInt16 = buffer.data.withUnsafeBytes { $0.load(fromByteOffset: entry + 20, as: UInt16.self) }
let curArgBytes: UInt16 = buffer.data.withUnsafeBytes { $0.load(fromByteOffset: entry + 22, as: UInt16.self) }
guard curNarg == narg, curArgBytes == argBytes else { continue }
let rawMunge = buffer.data.withUnsafeBytes { $0.load(fromByteOffset: entry + 8, as: UInt64.self) }
let target = decodeChainedPtr(rawMunge)
guard target > 0 else { continue }
candidates[target, default: []].append(entry)
}
guard !candidates.isEmpty else { return (-1, -1, 0) }
guard candidates.count == 1 else {
log(" [-] multiple distinct 8-arg munge32 helpers found: " +
candidates.keys.sorted().map { "0x\(String(format: "%X", $0))" }.joined(separator: ", "))
return (-1, -1, 0)
}
let (target, entries) = candidates.first!
return (target, entries[0], entries.count)
}
// MARK: - kcall10 Cave Builder
/// Build the ABI-correct kcall10 function body.
///
/// Contract:
/// x0 = proc*
/// x1 = &uthread->uu_arg[0] (uap pointer)
/// x2 = &uthread->uu_rval[0] (retval pointer)
///
/// uap layout (8 qwords):
/// [0] target function pointer
/// [1..7] arg0..arg6
///
/// Returns EINVAL (22) on null uap/retval/target, else stores X0 into retval
/// and returns 0 with _SYSCALL_RET_UINT64_T.
private func buildKcall10Cave() -> Data {
var code: [Data] = []
// pacibsp
code.append(ARM64.pacibsp)
// sub sp, sp, #0x30
code.append(encodeU32k(0xD100_C3FF)) // sub sp, sp, #0x30
// stp x21, x22, [sp]
code.append(encodeU32k(0xA900_5BF5)) // stp x21, x22, [sp]
// stp x19, x20, [sp, #0x10]
code.append(encodeU32k(0xA901_53F3)) // stp x19, x20, [sp, #0x10]
// stp x29, x30, [sp, #0x20]
code.append(encodeU32k(0xA902_7BFD)) // stp x29, x30, [sp, #0x20]
// add x29, sp, #0x20
code.append(encodeU32k(0x9100_83FD)) // add x29, sp, #0x20
// mov w19, #22 (EINVAL = 22 = 0x16)
code.append(encodeU32k(0x5280_02D3)) // movz w19, #0x16
// mov x20, x1 (save uap)
code.append(encodeU32k(0xAA01_03F4)) // mov x20, x1
// mov x21, x2 (save retval)
code.append(encodeU32k(0xAA02_03F5)) // mov x21, x2
// cbz x20, #0x30 (null uap skip to exit, 12 instrs forward)
code.append(encodeU32k(0xB400_0194)) // cbz x20, #+0x30
// cbz x21, #0x2c (null retval skip to exit)
code.append(encodeU32k(0xB400_0175)) // cbz x21, #+0x2c
// ldr x16, [x20] (target = uap[0])
code.append(encodeU32k(0xF940_0290)) // ldr x16, [x20]
// cbz x16, #0x24 (null target skip, 9 instrs)
code.append(encodeU32k(0xB400_0130)) // cbz x16, #+0x24
// ldp x0, x1, [x20, #0x8]
code.append(encodeU32k(0xA940_8680)) // ldp x0, x1, [x20, #0x8]
// ldp x2, x3, [x20, #0x18]
code.append(encodeU32k(0xA941_8E82)) // ldp x2, x3, [x20, #0x18]
// ldp x4, x5, [x20, #0x28]
code.append(encodeU32k(0xA942_9684)) // ldp x4, x5, [x20, #0x28]
// ldr x6, [x20, #0x38]
code.append(encodeU32k(0xF940_1E86)) // ldr x6, [x20, #0x38]
// mov x7, xzr
code.append(encodeU32k(0xAA1F_03E7)) // mov x7, xzr
// blr x16
code.append(encodeU32k(0xD63F_0200)) // blr x16
// str x0, [x21] (store result in retval)
code.append(encodeU32k(0xF900_02A0)) // str x0, [x21]
// mov w19, #0
code.append(encodeU32k(0x5280_0013)) // movz w19, #0
// mov w0, w19 (return value)
code.append(encodeU32k(0x2A13_03E0)) // mov w0, w19
// ldp x21, x22, [sp]
code.append(encodeU32k(0xA940_5BF5)) // ldp x21, x22, [sp]
// ldp x19, x20, [sp, #0x10]
code.append(encodeU32k(0xA941_53F3)) // ldp x19, x20, [sp, #0x10]
// ldp x29, x30, [sp, #0x20]
code.append(encodeU32k(0xA942_7BFD)) // ldp x29, x30, [sp, #0x20]
// add sp, sp, #0x30
code.append(encodeU32k(0x9100_C3FF)) // add sp, sp, #0x30
// retab
code.append(ARM64.retab)
return code.reduce(Data(), +)
}
// MARK: - Chain Pointer Helpers
/// Encode an arm64e kernel cache auth-rebase chained fixup pointer.
private func encodeChainedAuthPtr(
targetFoff: Int,
nextVal: UInt32,
diversity: UInt32,
key: UInt32,
addrDiv: UInt32
) -> Data {
let val: UInt64 =
(UInt64(targetFoff) & 0x3FFF_FFFF) |
(UInt64(diversity & 0xFFFF) << 32) |
(UInt64(addrDiv & 1) << 48) |
(UInt64(key & 3) << 49) |
(UInt64(nextVal & 0xFFF) << 51) |
(1 << 63)
return withUnsafeBytes(of: val.littleEndian) { Data($0) }
}
private func extractChainNext(_ raw: UInt64) -> UInt32 {
UInt32((raw >> 51) & 0xFFF)
}
private func extractChainDiversity(_ raw: UInt64) -> UInt32 {
UInt32((raw >> 32) & 0xFFFF)
}
private func extractChainAddrDiv(_ raw: UInt64) -> UInt32 {
UInt32((raw >> 48) & 1)
}
private func extractChainKey(_ raw: UInt64) -> UInt32 {
UInt32((raw >> 49) & 3)
}
// MARK: - Encoding Helper
private func encodeU32k(_ value: UInt32) -> Data {
ARM64.encodeU32(value)
}
}

View File

@@ -0,0 +1,81 @@
// KernelJBPatchLoadDylinker.swift JB: bypass load_dylinker policy gate in the dyld path.
//
// Python source: scripts/patchers/kernel_jb_patch_load_dylinker.py
//
// Reveal: string-anchor "/usr/lib/dyld" kernel-text function containing the ref
// inside that function: BL <check>; CBZ W0, <allow>; MOV W0, #2 (deny path).
// Patch: replace BL with unconditional B to <allow>, skipping the policy check.
import Foundation
extension KernelJBPatcher {
/// Bypass the load_dylinker policy gate in the dyld path.
@discardableResult
func patchLoadDylinker() -> Bool {
log("\n[JB] _load_dylinker: skip dyld policy check")
guard let strOff = buffer.findString("/usr/lib/dyld") else {
log(" [-] '/usr/lib/dyld' string not found")
return false
}
let refs = findStringRefs(strOff)
guard !refs.isEmpty else {
log(" [-] no kernel-text code refs to '/usr/lib/dyld'")
return false
}
for (adrpOff, _) in refs {
guard jbIsInCodeRange(adrpOff) else { continue }
guard let funcStart = findFunctionStart(adrpOff) else { continue }
let funcEnd = findFuncEnd(funcStart, maxSize: 0x1200)
guard let (blOff, allowTarget) = findBlCbzGate(funcStart: funcStart, funcEnd: funcEnd) else {
continue
}
guard let bBytes = ARM64Encoder.encodeB(from: blOff, to: allowTarget) else { continue }
log(" [+] dyld anchor func at 0x\(String(format: "%X", funcStart)), patch BL at 0x\(String(format: "%X", blOff))")
emit(blOff, bBytes,
patchID: "jb.load_dylinker.policy_bypass",
virtualAddress: fileOffsetToVA(blOff),
description: "b #0x\(String(format: "%X", allowTarget - blOff)) [_load_dylinker policy bypass]")
return true
}
log(" [-] dyld policy gate not found in dyld-anchored function")
return false
}
// MARK: - Private helpers
/// Scan [funcStart, funcEnd) for `BL <check> ; CBZ W0, <allow> ; MOV W0, #2`.
/// Returns (blOff, allowTarget) on success.
private func findBlCbzGate(funcStart: Int, funcEnd: Int) -> (blOff: Int, allowTarget: Int)? {
var off = funcStart
while off + 12 <= funcEnd {
defer { off += 4 }
let insns0 = disasm.disassemble(in: buffer.data, at: off, count: 1)
guard let i0 = insns0.first, i0.mnemonic == "bl" else { continue }
let insns1 = disasm.disassemble(in: buffer.data, at: off + 4, count: 1)
guard let i1 = insns1.first, i1.mnemonic == "cbz" else { continue }
guard i1.operandString.hasPrefix("w0, ") else { continue }
guard let detail1 = i1.aarch64, detail1.operands.count >= 2 else { continue }
let allowTarget = Int(detail1.operands.last!.imm)
// Selector: deny path sets w0 = 2 immediately after CBZ.
let insns2 = disasm.disassemble(in: buffer.data, at: off + 8, count: 1)
if let i2 = insns2.first,
i2.mnemonic == "mov",
i2.operandString.hasPrefix("w0, #2")
{
return (off, allowTarget)
}
}
return nil
}
}

View File

@@ -0,0 +1,187 @@
// KernelJBPatchMacMount.swift JB kernel patch: MAC mount bypass
//
// Python source: scripts/patchers/kernel_jb_patch_mac_mount.py
import Capstone
import Foundation
extension KernelJBPatcher {
/// Apply the upstream twin bypasses in the mount-role wrapper.
///
/// Patches two sites in the wrapper that decides whether execution can
/// continue into `mount_common()`:
/// - `tbnz wFlags, #5, deny` NOP
/// - `ldrb w8, [xTmp, #1]` `mov x8, xzr`
///
/// Runtime design:
/// 1. Recover `mount_common` from the `"mount_common()"` string.
/// 2. Scan a bounded neighborhood for local callers.
/// 3. Select the unique caller containing both upstream gates.
@discardableResult
func patchMacMount() -> Bool {
log("\n[JB] ___mac_mount: upstream twin bypass")
guard let strOff = buffer.findString("mount_common()") else {
log(" [-] mount_common anchor function not found")
return false
}
let refs = findStringRefs(strOff)
guard !refs.isEmpty, let mountCommon = findFunctionStart(refs[0].adrpOff) else {
log(" [-] mount_common anchor function not found")
return false
}
// Scan +/-0x5000 of mount_common for callers in code ranges
let searchStart = max(codeRanges.first?.start ?? 0, mountCommon - 0x5000)
let searchEnd = min(codeRanges.first?.end ?? buffer.count, mountCommon + 0x5000)
var candidates: [Int: (Int, Int)] = [:] // caller (flagGate, stateGate)
var off = searchStart
while off < searchEnd {
guard let blTarget = decodeBLat(off), blTarget == mountCommon else { off += 4; continue }
guard let caller = findFunctionStart(off), caller != mountCommon,
candidates[caller] == nil
else { off += 4; continue }
let callerEnd = findFuncEnd(caller, maxSize: 0x1200)
if let sites = matchUpstreamMountWrapper(start: caller, end: callerEnd, mountCommon: mountCommon) {
candidates[caller] = sites
}
off += 4
}
guard candidates.count == 1 else {
log(" [-] expected 1 upstream mac_mount candidate, found \(candidates.count)")
return false
}
let (branchOff, movOff) = candidates.values.first!
let va1 = fileOffsetToVA(branchOff)
let va2 = fileOffsetToVA(movOff)
emit(branchOff, ARM64.nop,
patchID: "kernelcache_jb.mac_mount.flag_gate",
virtualAddress: va1,
description: "NOP [___mac_mount upstream flag gate]")
emit(movOff, ARM64.movX8Xzr,
patchID: "kernelcache_jb.mac_mount.state_clear",
virtualAddress: va2,
description: "mov x8,xzr [___mac_mount upstream state clear]")
return true
}
// MARK: - Private helpers
private func matchUpstreamMountWrapper(start: Int, end: Int, mountCommon: Int) -> (Int, Int)? {
// Collect all BL-to-mount_common call sites
var callSites: [Int] = []
for off in stride(from: start, to: end, by: 4) {
if decodeBLat(off) == mountCommon { callSites.append(off) }
}
guard !callSites.isEmpty else { return nil }
guard let flagGate = findFlagGate(start: start, end: end) else { return nil }
guard let stateGate = findStateGate(start: start, end: end, callSites: callSites) else { return nil }
return (flagGate, stateGate)
}
/// Find a unique `tbnz wN, #5, <deny>` where deny-block starts with `mov w?, #1`.
private func findFlagGate(start: Int, end: Int) -> Int? {
var hits: [Int] = []
var off = start
while off + 4 < end {
let insns = disasm.disassemble(in: buffer.data, at: off, count: 1)
guard let insn = insns.first else { off += 4; continue }
guard insn.mnemonic == "tbnz",
let ops = insn.aarch64?.operands, ops.count == 3,
ops[0].type == AARCH64_OP_REG,
ops[1].type == AARCH64_OP_IMM, ops[1].imm == 5,
ops[2].type == AARCH64_OP_IMM
else { off += 4; continue }
// Check register is a w-register
let regName = insn.operandString.components(separatedBy: ",").first?
.trimmingCharacters(in: CharacterSet.whitespaces) ?? ""
guard regName.hasPrefix("w") else { off += 4; continue }
let target = Int(ops[2].imm)
guard target >= start, target < end else { off += 4; continue }
// Target must start with `mov w?, #1`
let targetInsns = disasm.disassemble(in: buffer.data, at: target, count: 1)
guard let tInsn = targetInsns.first,
tInsn.mnemonic == "mov",
let tOps = tInsn.aarch64?.operands, tOps.count == 2,
tOps[0].type == AARCH64_OP_REG,
tOps[1].type == AARCH64_OP_IMM, tOps[1].imm == 1
else { off += 4; continue }
let tRegName = tInsn.operandString.components(separatedBy: ",").first?
.trimmingCharacters(in: CharacterSet.whitespaces) ?? ""
guard tRegName.hasPrefix("w") else { off += 4; continue }
hits.append(off)
off += 4
}
return hits.count == 1 ? hits[0] : nil
}
/// Find `add x?, x?, #0x70 / ldrb w8, [x?, #1] / tbz w8, #6, <near call>`
private func findStateGate(start: Int, end: Int, callSites: [Int]) -> Int? {
var hits: [Int] = []
var off = start
while off + 8 < end {
let insns = disasm.disassemble(in: buffer.data, at: off, count: 3)
guard insns.count >= 3 else { off += 4; continue }
let addInsn = insns[0], ldrInsn = insns[1], tbzInsn = insns[2]
// add xD, xS, #0x70
guard addInsn.mnemonic == "add",
let addOps = addInsn.aarch64?.operands, addOps.count == 3,
addOps[0].type == AARCH64_OP_REG,
addOps[1].type == AARCH64_OP_REG,
addOps[2].type == AARCH64_OP_IMM, addOps[2].imm == 0x70
else { off += 4; continue }
let addDst = addOps[0].reg
let addDstName = addInsn.operandString.components(separatedBy: ",").first?
.trimmingCharacters(in: CharacterSet.whitespaces) ?? ""
guard addDstName.hasPrefix("x") else { off += 4; continue }
// ldrb w8, [xDst, #1]
guard ldrInsn.mnemonic == "ldrb",
let ldrOps = ldrInsn.aarch64?.operands, ldrOps.count >= 2,
ldrOps[0].type == AARCH64_OP_REG,
ldrOps[1].type == AARCH64_OP_MEM,
ldrOps[1].mem.base == addDst,
ldrOps[1].mem.disp == 1
else { off += 4; continue }
let ldrDstReg = ldrOps[0].reg
let ldrDstName = ldrInsn.operandString.components(separatedBy: ",").first?
.trimmingCharacters(in: CharacterSet.whitespaces) ?? ""
guard ldrDstName.hasPrefix("w") else { off += 4; continue }
// tbz wLdr, #6, <target near a call>
guard tbzInsn.mnemonic == "tbz",
let tbzOps = tbzInsn.aarch64?.operands, tbzOps.count == 3,
tbzOps[0].type == AARCH64_OP_REG, tbzOps[0].reg == ldrDstReg,
tbzOps[1].type == AARCH64_OP_IMM, tbzOps[1].imm == 6,
tbzOps[2].type == AARCH64_OP_IMM
else { off += 4; continue }
let tbzTarget = Int(tbzOps[2].imm)
guard callSites.contains(where: { tbzTarget <= $0 && $0 <= tbzTarget + 0x80 }) else {
off += 4; continue
}
hits.append(Int(ldrInsn.address))
off += 4
}
return hits.count == 1 ? hits[0] : nil
}
/// Decode a BL instruction at `off`, returning the target file offset or nil.
private func decodeBLat(_ off: Int) -> Int? {
guard off + 4 <= buffer.count else { return nil }
let insn = buffer.readU32(at: off)
guard insn >> 26 == 0b100101 else { return nil }
let imm26 = insn & 0x03FF_FFFF
let signedImm = Int32(bitPattern: imm26 << 6) >> 6
return off + Int(signedImm) * 4
}
}

View File

@@ -0,0 +1,65 @@
// KernelJBPatchNvram.swift JB kernel patch: NVRAM permission bypass
//
// Python source: scripts/patchers/kernel_jb_patch_nvram.py
import Foundation
extension KernelJBPatcher {
/// NOP the verifyPermission gate in the `krn.` key-prefix path.
///
/// Runtime reveal is string-anchored only: enumerate code refs to `"krn."`,
/// recover the containing function for each ref, then pick the unique
/// `tbz/tbnz` guard immediately before that key-prefix load sequence.
@discardableResult
func patchNvramVerifyPermission() -> Bool {
log("\n[JB] verifyPermission (NVRAM): NOP")
guard let strOff = buffer.findString("krn.") else {
log(" [-] 'krn.' string not found")
return false
}
let refs = findStringRefs(strOff)
if refs.isEmpty {
log(" [-] no code refs to 'krn.'")
return false
}
var hits: [Int] = []
var seen = Set<Int>()
for (refAdrp, _) in refs {
guard let funcOff = findFunctionStart(refAdrp), !seen.contains(funcOff) else { continue }
seen.insert(funcOff)
// Scan backward from the ADRP ref up to 8 instructions looking for tbz/tbnz
let scanStart = max(funcOff, refAdrp - 0x20)
var off = refAdrp - 4
while off >= scanStart {
let insns = disasm.disassemble(in: buffer.data, at: off, count: 1)
if let insn = insns.first,
insn.mnemonic == "tbz" || insn.mnemonic == "tbnz"
{
hits.append(off)
break
}
off -= 4
}
}
// Deduplicate and require exactly one
let unique = Array(Set(hits)).sorted()
guard unique.count == 1 else {
log(" [-] expected 1 NVRAM verifyPermission gate, found \(unique.count)")
return false
}
let patchOff = unique[0]
let va = fileOffsetToVA(patchOff)
emit(patchOff, ARM64.nop,
patchID: "kernelcache_jb.nvram_verify_permission",
virtualAddress: va,
description: "NOP [verifyPermission NVRAM]")
return true
}
}

View File

@@ -0,0 +1,64 @@
// KernelJBPatchPortToMap.swift JB: skip kernel-map panic in _convert_port_to_map_with_flavor.
//
// Python source: scripts/patchers/kernel_jb_patch_port_to_map.py
//
// Reveal: string-anchor "userspace has control access to a kernel map"
// walk backward from ADRP to find CMP + B.cond (conditional branch forward past panic)
// replace B.cond with unconditional B to same target.
import Foundation
extension KernelJBPatcher {
/// Skip kernel-map panic in _convert_port_to_map_with_flavor.
@discardableResult
func patchConvertPortToMap() -> Bool {
log("\n[JB] _convert_port_to_map_with_flavor: skip panic")
guard let strOff = buffer.findString("userspace has control access to a kernel map") else {
log(" [-] panic string not found")
return false
}
let refs = findStringRefs(strOff)
guard !refs.isEmpty else {
log(" [-] no code refs")
return false
}
for (adrpOff, _) in refs {
// Walk backward from the ADRP to find CMP + B.cond
var back = adrpOff - 4
let scanLimit = max(adrpOff - 0x60, 0)
while back >= scanLimit {
defer { back -= 4 }
guard back >= 0, back + 8 <= buffer.count else { continue }
let insns = disasm.disassemble(in: buffer.data, at: back, count: 2)
guard insns.count >= 2 else { continue }
let i0 = insns[0], i1 = insns[1]
guard i0.mnemonic == "cmp" else { continue }
guard i1.mnemonic.hasPrefix("b.") else { continue }
// Decode branch target must be forward, past the ADRP (panic path).
guard let (branchTarget, _) = jbDecodeBranchTarget(at: back + 4),
branchTarget > adrpOff else { continue }
// Found the conditional branch guarding the panic fall-through.
// Replace with unconditional B to the same forward target.
guard let bBytes = ARM64Encoder.encodeB(from: back + 4, to: branchTarget) else {
continue
}
emit(back + 4, bBytes,
patchID: "jb.port_to_map.skip_panic",
virtualAddress: fileOffsetToVA(back + 4),
description: "b 0x\(String(format: "%X", branchTarget)) [_convert_port_to_map skip panic]")
return true
}
}
log(" [-] branch site not found")
return false
}
}

View File

@@ -0,0 +1,93 @@
// KernelJBPatchPostValidation.swift JB: additional post-validation cmp w0,w0 bypass.
//
// Python source: scripts/patchers/kernel_jb_patch_post_validation.py
//
// Reveal: string-anchor "AMFI: code signature validation failed" caller function
// BL targets in AMFI text callee with `cmp w0,#imm ; b.ne` preceded by a BL.
// Patch: replace `cmp w0,#imm` with `cmp w0,w0` so the compare always sets Z=1.
import Capstone
import Foundation
extension KernelJBPatcher {
/// Patch: rewrite the SHA256-only reject compare in AMFI's post-validation path.
@discardableResult
func patchPostValidationAdditional() -> Bool {
log("\n[JB] postValidation additional: cmp w0,w0")
guard let strOff = buffer.findString("AMFI: code signature validation failed") else {
log(" [-] string not found")
return false
}
let refs = findStringRefs(strOff)
guard !refs.isEmpty else {
log(" [-] no code refs")
return false
}
// Collect unique caller function starts.
var seenFuncs = Set<Int>()
var hits: [Int] = []
for (adrpOff, _) in refs {
guard let callerStart = findFunctionStart(adrpOff),
!seenFuncs.contains(callerStart) else { continue }
seenFuncs.insert(callerStart)
let callerEnd = findFuncEnd(callerStart, maxSize: 0x2000)
// Collect BL targets within the caller.
var blTargets = Set<Int>()
for scan in stride(from: callerStart, to: callerEnd, by: 4) {
if let target = jbDecodeBL(at: scan) {
blTargets.insert(target)
}
}
// For each BL target within code, look for cmp w0,#imm ; b.ne preceded by a BL.
for target in blTargets.sorted() {
guard jbIsInCodeRange(target) else { continue }
let calleeEnd = findFuncEnd(target, maxSize: 0x200)
for off in stride(from: target, to: calleeEnd - 4, by: 4) {
let insns = disasm.disassemble(in: buffer.data, at: off, count: 2)
guard insns.count >= 2 else { continue }
let i0 = insns[0], i1 = insns[1]
// Must be: cmp w0, #imm followed by b.ne
guard i0.mnemonic == "cmp", i1.mnemonic == "b.ne" else { continue }
guard let detail0 = i0.aarch64, detail0.operands.count >= 2 else { continue }
let op0 = detail0.operands[0]
let op1 = detail0.operands[1]
guard op0.type == AARCH64_OP_REG, op0.reg == AARCH64_REG_W0 else { continue }
guard op1.type == AARCH64_OP_IMM else { continue }
// Must be preceded by a BL within 3 instructions.
var hasBlBefore = false
for back in stride(from: off - 4, through: max(off - 12, target), by: -4) {
if jbDecodeBL(at: back) != nil {
hasBlBefore = true
break
}
}
guard hasBlBefore else { continue }
hits.append(off)
}
}
}
let uniqueHits = Array(Set(hits)).sorted()
guard uniqueHits.count == 1 else {
log(" [-] expected 1 postValidation compare site, found \(uniqueHits.count)")
return false
}
let patchOff = uniqueHits[0]
emit(patchOff, ARM64.cmpW0W0,
patchID: "jb.post_validation.cmp_w0_w0",
virtualAddress: fileOffsetToVA(patchOff),
description: "cmp w0,w0 [postValidation additional]")
return true
}
}

View File

@@ -0,0 +1,67 @@
// KernelJBPatchProcPidinfo.swift JB: NOP the two pid-0 guards in proc_pidinfo.
//
// Python source: scripts/patchers/kernel_jb_patch_proc_pidinfo.py
//
// Reveal: shared _proc_info switch-table anchor function prologue (first 0x80 bytes)
// precise 4-insn pattern: ldr x0,[x0,#0x18] ; cbz x0,fail ; bl ... ; cbz/cbnz wN,fail.
// Patch: NOP the two cbz/cbnz guards (instructions at +4 and +12 of the pattern).
import Capstone
import Foundation
extension KernelJBPatcher {
/// Bypass the two early pid-0/proc-null guards in proc_pidinfo.
@discardableResult
func patchProcPidinfo() -> Bool {
log("\n[JB] _proc_pidinfo: NOP pid-0 guard (2 sites)")
guard let (procInfoFunc, _) = findProcInfoAnchor() else {
log(" [-] _proc_info function not found")
return false
}
var firstGuard: Int?
var secondGuard: Int?
let prologueEnd = min(procInfoFunc + 0x80, buffer.count)
var off = procInfoFunc
while off + 16 <= prologueEnd {
defer { off += 4 }
let insns = disasm.disassemble(in: buffer.data, at: off, count: 4)
guard insns.count >= 4 else { continue }
let i0 = insns[0], i1 = insns[1], i2 = insns[2], i3 = insns[3]
// Pattern: ldr x0, [x0, #0x18]
guard i0.mnemonic == "ldr",
i0.operandString.hasPrefix("x0, [x0, #0x18]") else { continue }
// cbz x0, <label>
guard i1.mnemonic == "cbz",
i1.operandString.hasPrefix("x0, ") else { continue }
// bl <target>
guard i2.mnemonic == "bl" else { continue }
// cbz/cbnz wN, <label>
guard i3.mnemonic == "cbz" || i3.mnemonic == "cbnz",
i3.operandString.hasPrefix("w") else { continue }
firstGuard = off + 4 // cbz x0
secondGuard = off + 12 // cbz/cbnz wN
break
}
guard let guardA = firstGuard, let guardB = secondGuard else {
log(" [-] precise proc_pidinfo guard pair not found")
return false
}
emit(guardA, ARM64.nop,
patchID: "jb.proc_pidinfo.nop_guard_a",
virtualAddress: fileOffsetToVA(guardA),
description: "NOP [_proc_pidinfo pid-0 guard A]")
emit(guardB, ARM64.nop,
patchID: "jb.proc_pidinfo.nop_guard_b",
virtualAddress: fileOffsetToVA(guardB),
description: "NOP [_proc_pidinfo pid-0 guard B]")
return true
}
}

View File

@@ -0,0 +1,86 @@
// KernelJBPatchProcSecurity.swift JB: stub _proc_security_policy with mov x0,#0; ret.
//
// Python source: scripts/patchers/kernel_jb_patch_proc_security.py
//
// Reveal: find _proc_info by `sub wN,wM,#1 ; cmp wN,#0x21` switch pattern,
// then identify _proc_security_policy among BL targets called 2+ times,
// with function size in [0x40, 0x200].
import Foundation
extension KernelJBPatcher {
/// Stub _proc_security_policy: mov x0,#0; ret.
@discardableResult
func patchProcSecurityPolicy() -> Bool {
log("\n[JB] _proc_security_policy: mov x0,#0; ret")
guard let (procInfoFunc, switchOff) = findProcInfoAnchor() else {
log(" [-] _proc_info function not found")
return false
}
let ksStart = codeRanges.first?.start ?? 0
let ksEnd = codeRanges.first?.end ?? buffer.count
let procInfoEnd = findFuncEnd(procInfoFunc, maxSize: 0x4000)
log(" [+] _proc_info at 0x\(String(format: "%X", procInfoFunc)) (size 0x\(String(format: "%X", procInfoEnd - procInfoFunc)))")
// Count BL targets after switch dispatch within _proc_info.
var blTargetCounts: [Int: Int] = [:]
for off in stride(from: switchOff, to: procInfoEnd, by: 4) {
guard off + 4 <= buffer.count else { break }
let insn = buffer.readU32(at: off)
// BL: [31:26] = 0b100101
guard insn >> 26 == 0b100101 else { continue }
let imm26 = insn & 0x03FF_FFFF
let signedImm = Int32(bitPattern: imm26 << 6) >> 6
let target = off + Int(signedImm) * 4
guard target >= ksStart, target < ksEnd else { continue }
blTargetCounts[target, default: 0] += 1
}
guard !blTargetCounts.isEmpty else {
log(" [-] no BL targets found in _proc_info switch cases")
return false
}
// Sort by count descending, then by address ascending (to match Python
// Counter.most_common() insertion-order tie-breaking from the forward scan).
let sorted = blTargetCounts.sorted {
if $0.value != $1.value { return $0.value > $1.value }
return $0.key < $1.key
}
for (foff, count) in sorted {
guard count >= 2 else { break }
let funcEnd = findFuncEnd(foff, maxSize: 0x400)
let funcSize = funcEnd - foff
log(" [*] candidate 0x\(String(format: "%X", foff)): \(count) calls, size 0x\(String(format: "%X", funcSize))")
if funcSize > 0x200 {
log(" [-] skipped (too large, likely utility)")
continue
}
if funcSize < 0x40 {
log(" [-] skipped (too small)")
continue
}
log(" [+] identified _proc_security_policy at 0x\(String(format: "%X", foff)) (\(count) calls, size 0x\(String(format: "%X", funcSize)))")
emit(foff, ARM64.movX0_0,
patchID: "jb.proc_security_policy.mov_x0_0",
virtualAddress: fileOffsetToVA(foff),
description: "mov x0,#0 [_proc_security_policy]")
emit(foff + 4, ARM64.ret,
patchID: "jb.proc_security_policy.ret",
virtualAddress: fileOffsetToVA(foff + 4),
description: "ret [_proc_security_policy]")
return true
}
log(" [-] _proc_security_policy not identified among BL targets")
return false
}
}

View File

@@ -0,0 +1,208 @@
// KernelJBPatchSandboxExtended.swift JB kernel patch: Extended sandbox hooks bypass
//
// Python source: scripts/patchers/kernel_jb_patch_sandbox_extended.py
//
// Strategy (ops-table retargeting matches upstream patch_fw.py):
// 1. Locate mac_policy_conf via the "Seatbelt sandbox policy" and "Sandbox" strings
// in __DATA_CONST / __DATA segments. The conf struct at offset +32 holds a tagged
// pointer to mac_policy_ops.
// 2. Find the common Sandbox allow stub (mov x0,#0 ; ret) the highest-address
// instance in sandbox text is the canonical one.
// 3. For each extended hook index (201316), read the 8-byte tagged pointer from
// ops_table + index * 8, retarget its low 32 bits to allow_stub while preserving
// the high 32 bits (PAC/auth-rebase metadata), and emit the new value.
import Foundation
extension KernelJBPatcher {
/// Extended sandbox hooks bypass: retarget ops entries to the allow stub.
@discardableResult
func patchSandboxHooksExtended() -> Bool {
log("\n[JB] Sandbox extended hooks: retarget ops entries to allow stub")
guard let opsTable = findSandboxOpsTableViaConf() else {
return false
}
guard let allowStub = findSandboxAllowStub() else {
log(" [-] common Sandbox allow stub not found")
return false
}
// Extended hook index table (name ops slot index).
let hookIndices: [(String, Int)] = [
("iokit_check_201", 201),
("iokit_check_202", 202),
("iokit_check_203", 203),
("iokit_check_204", 204),
("iokit_check_205", 205),
("iokit_check_206", 206),
("iokit_check_207", 207),
("iokit_check_208", 208),
("iokit_check_209", 209),
("iokit_check_210", 210),
("vnode_check_getattr", 245),
("proc_check_get_cs_info", 249),
("proc_check_set_cs_info", 250),
("proc_check_set_cs_info2", 252),
("vnode_check_chroot", 254),
("vnode_check_create", 255),
("vnode_check_deleteextattr", 256),
("vnode_check_exchangedata", 257),
("vnode_check_exec", 258),
("vnode_check_getattrlist", 259),
("vnode_check_getextattr", 260),
("vnode_check_ioctl", 261),
("vnode_check_link", 264),
("vnode_check_listextattr", 265),
("vnode_check_open", 267),
("vnode_check_readlink", 270),
("vnode_check_setattrlist", 275),
("vnode_check_setextattr", 276),
("vnode_check_setflags", 277),
("vnode_check_setmode", 278),
("vnode_check_setowner", 279),
("vnode_check_setutimes", 280),
("vnode_check_stat", 281),
("vnode_check_truncate", 282),
("vnode_check_unlink", 283),
("vnode_check_fsgetpath", 316),
]
var patched = 0
for (hookName, idx) in hookIndices {
let entryOff = opsTable + idx * 8
guard entryOff + 8 <= buffer.count else { continue }
let entryRaw = buffer.readU64(at: entryOff)
guard entryRaw != 0 else { continue }
guard let newEntry = encodeAuthRebaseLike(origVal: entryRaw, targetOff: allowStub) else {
continue
}
var newBytes = Data(count: 8)
withUnsafeBytes(of: newEntry.littleEndian) { src in
newBytes.replaceSubrange(0 ..< 8, with: src)
}
emit(entryOff, newBytes,
patchID: "sandbox_ext_\(idx)",
virtualAddress: nil,
description: "ops[\(idx)] -> allow stub [_hook_\(hookName)]")
patched += 1
}
if patched == 0 {
log(" [-] no extended sandbox hooks retargeted")
return false
}
return true
}
// MARK: - Sandbox ops table discovery
/// Locate the Sandbox mac_policy_ops table via mac_policy_conf.
///
/// Searches __DATA_CONST and __DATA segments for the conf struct:
/// [0..7] tagged ptr "Sandbox\0"
/// [8..15] tagged ptr "Seatbelt sandbox policy\0"
/// [32..39] tagged ptr mac_policy_ops table
private func findSandboxOpsTableViaConf() -> Int? {
log("\n[*] Finding Sandbox mac_policy_ops via mac_policy_conf...")
guard let seatbeltOff = buffer.findString("Seatbelt sandbox policy") else {
log(" [-] Sandbox/Seatbelt strings not found")
return nil
}
// Find "\0Sandbox\0" and return offset of 'S'
guard let sandboxPattern = "\u{0}Sandbox\u{0}".data(using: .utf8),
let sandboxRange = buffer.data.range(of: sandboxPattern)
else {
log(" [-] Sandbox string not found")
return nil
}
let sandboxOff = sandboxRange.lowerBound + 1 // skip leading NUL
log(" [*] Sandbox string at foff 0x\(String(format: "%X", sandboxOff)), Seatbelt at 0x\(String(format: "%X", seatbeltOff))")
// Collect __DATA_CONST and __DATA segment ranges.
var dataRanges: [(Int, Int)] = []
for seg in segments {
if seg.name == "__DATA_CONST" || seg.name == "__DATA", seg.fileSize > 0 {
let s = Int(seg.fileOffset)
dataRanges.append((s, s + Int(seg.fileSize)))
}
}
for (dStart, dEnd) in dataRanges {
var i = dStart
while i <= dEnd - 40 {
defer { i += 8 }
let val = buffer.readU64(at: i)
// Must not be zero or a tagged (high-bit set) pointer at position [0].
if val == 0 || (val & (1 << 63)) != 0 { continue }
// Low 43 bits must point to sandboxOff (auth-rebase chained ptr format).
guard (val & 0x7FF_FFFF_FFFF) == UInt64(sandboxOff) else { continue }
let val2 = buffer.readU64(at: i + 8)
if (val2 & (1 << 63)) != 0 { continue }
guard (val2 & 0x7FF_FFFF_FFFF) == UInt64(seatbeltOff) else { continue }
// Offset +32: tagged ptr to mac_policy_ops.
let valOps = buffer.readU64(at: i + 32)
if (valOps & (1 << 63)) == 0 {
let opsOff = Int(valOps & 0x7FF_FFFF_FFFF)
log(" [+] mac_policy_conf at foff 0x\(String(format: "%X", i)), mpc_ops -> 0x\(String(format: "%X", opsOff))")
return opsOff
}
}
}
log(" [-] mac_policy_conf not found")
return nil
}
// MARK: - Allow stub discovery
/// Find the Sandbox common allow stub: `mov x0, #0 ; ret`.
///
/// Scans sandbox kext text for consecutive MOV_X0_0 + RET pairs and returns
/// the highest-address hit (matches upstream patch_fw.py choice).
private func findSandboxAllowStub() -> Int? {
// Use the Sandbox kext's __TEXT_EXEC.__text range (matches Python self.sandbox_text).
let sbRange = sandboxTextRange()
let (sbStart, sbEnd) = (sbRange.start, sbRange.end)
let movX0_0: UInt32 = 0xD280_0000 // MOV X0, #0 (MOVZ X0, #0)
let retVal: UInt32 = 0xD65F_03C0 // RET
var hits: [Int] = []
var off = sbStart
while off < sbEnd - 8 {
if buffer.readU32(at: off) == movX0_0,
buffer.readU32(at: off + 4) == retVal
{
hits.append(off)
}
off += 4
}
guard let stub = hits.max() else { return nil }
log(" [+] common Sandbox allow stub at 0x\(String(format: "%X", stub))")
return stub
}
// MARK: - Auth-rebase pointer retargeting
/// Retarget an auth-rebase chained pointer while preserving PAC metadata.
///
/// Auth-rebase format (high bit set): [63]=1, [62:32]=auth/diversity bits, [31:0]=target
/// Replace the low 32 bits with the new target offset.
private func encodeAuthRebaseLike(origVal: UInt64, targetOff: Int) -> UInt64? {
// Must be a tagged (auth) pointer bit 63 must be set.
guard (origVal & (1 << 63)) != 0 else { return nil }
let highBits = origVal & 0xFFFF_FFFF_0000_0000
let newLow = UInt64(targetOff) & 0xFFFF_FFFF
return highBits | newLow
}
}

View File

@@ -0,0 +1,217 @@
// KernelJBPatchSecureRoot.swift JB: force SecureRootName policy to return success.
//
// Python source: scripts/patchers/kernel_jb_patch_secure_root.py
//
// Reveal: find functions referencing both "SecureRootName" and "SecureRoot" strings
// locate the final CSEL that selects between wzr (success) and kIOReturnNotPrivileged
// verify context: TST+LDRB at [x19,#0x11A] before, STRB+CSET+CMP w0,#0 further back.
// Patch: replace CSEL with MOV <dest>, #0.
import Capstone
import Foundation
extension KernelJBPatcher {
/// Offset of the SecureRoot match flag inside the dispatch struct.
private static let secureRootMatchOffset: Int = 0x11A
/// Force SecureRootName policy return to success in _IOSecureBSDRoot.
@discardableResult
func patchIoSecureBsdRoot() -> Bool {
log("\n[JB] _IOSecureBSDRoot: force SecureRootName success")
let candidates = findSecureRootFunctions()
guard !candidates.isEmpty else {
log(" [-] secure-root dispatch function not found")
return false
}
for funcStart in candidates.sorted() {
let funcEnd = findFuncEnd(funcStart, maxSize: 0x1200)
guard let (off, destReg) = findSecureRootReturnSite(funcStart: funcStart, funcEnd: funcEnd) else {
continue
}
// Encode mov <destReg>, #0 (always a 32-bit W register)
guard let patchBytes = encodeMovWReg0(destReg) else { continue }
emit(off, patchBytes,
patchID: "jb.io_secure_bsd_root.zero_return",
virtualAddress: fileOffsetToVA(off),
description: "mov \(destReg), #0 [_IOSecureBSDRoot SecureRootName allow]")
return true
}
log(" [-] SecureRootName deny-return site not found")
return false
}
// MARK: - Private helpers
/// Find all functions that reference both "SecureRootName" and "SecureRoot".
private func findSecureRootFunctions() -> Set<Int> {
let withName = functionsReferencingString("SecureRootName")
let withRoot = functionsReferencingString("SecureRoot")
let common = withName.intersection(withRoot)
return common.isEmpty ? withName : common
}
/// Find all function starts that reference `needle` via ADRP+ADD.
private func functionsReferencingString(_ needle: String) -> Set<Int> {
var result = Set<Int>()
// Scan all occurrences of the needle in the buffer.
guard let encoded = needle.data(using: .utf8) else { return result }
var searchFrom = 0
while searchFrom < buffer.count {
guard let range = buffer.data.range(of: encoded, in: searchFrom ..< buffer.count) else { break }
let pos = range.lowerBound
// Find null-terminated C string boundary.
var cstrStart = pos
while cstrStart > 0, buffer.data[cstrStart - 1] != 0 {
cstrStart -= 1
}
var cstrEnd = pos
while cstrEnd < buffer.count, buffer.data[cstrEnd] != 0 {
cstrEnd += 1
}
// Only accept if the C string equals the needle exactly.
if buffer.data[cstrStart ..< cstrEnd] == encoded {
let refs = findStringRefs(cstrStart)
for (adrpOff, _) in refs {
if let fn = findFunctionStart(adrpOff) {
result.insert(fn)
}
}
}
searchFrom = pos + 1
}
return result
}
/// Scan [funcStart, funcEnd) for the CSEL that is the SecureRootName deny/allow selector.
/// Returns (offset, destRegName) on success.
private func findSecureRootReturnSite(funcStart: Int, funcEnd: Int) -> (Int, String)? {
for off in stride(from: funcStart, to: funcEnd - 4, by: 4) {
let insns = disasm.disassemble(in: buffer.data, at: off, count: 1)
guard let insn = insns.first, insn.mnemonic == "csel" else { continue }
guard let detail = insn.aarch64, detail.operands.count >= 3 else { continue }
let destOp = detail.operands[0]
let zeroSrcOp = detail.operands[1]
let errSrcOp = detail.operands[2]
guard destOp.type == AARCH64_OP_REG,
zeroSrcOp.type == AARCH64_OP_REG,
errSrcOp.type == AARCH64_OP_REG else { continue }
let destName = disasm.registerName(UInt32(destOp.reg.rawValue)) ?? ""
let zeroName = disasm.registerName(UInt32(zeroSrcOp.reg.rawValue)) ?? ""
let errName = disasm.registerName(UInt32(errSrcOp.reg.rawValue)) ?? ""
// Must be: csel wX, wzr, wErr, ne
guard destName.hasPrefix("w") else { continue }
guard zeroName == "wzr" || zeroName == "xzr" else { continue }
// Last operand string should contain "ne"
let opStr = insn.operandString.replacingOccurrences(of: " ", with: "")
guard opStr.hasSuffix(",ne") || opStr.hasSuffix("ne") else { continue }
// Verify return context: TST + LDRB [x19, #0x11A] walking back.
guard hasSecureRootReturnContext(off: off, funcStart: funcStart, errRegName: errName) else { continue }
// Verify compare context: STRB + CSET + CMP w0,#0 walking back.
guard hasSecureRootCompareContext(off: off, funcStart: funcStart) else { continue }
return (off, destName)
}
return nil
}
/// Walk backward from `off` to verify the flag-load and error-build context.
private func hasSecureRootReturnContext(off: Int, funcStart: Int, errRegName: String) -> Bool {
var sawFlagLoad = false
var sawFlagTest = false
var sawErrBuild = false
let lookbackStart = max(funcStart, off - 0x40)
var probe = off - 4
while probe >= lookbackStart {
defer { probe -= 4 }
let insns = disasm.disassemble(in: buffer.data, at: probe, count: 1)
guard let ins = insns.first else { continue }
let ops = ins.operandString.replacingOccurrences(of: " ", with: "")
if !sawFlagTest, ins.mnemonic == "tst", ops.hasSuffix("#1") {
sawFlagTest = true
continue
}
if sawFlagTest, !sawFlagLoad, ins.mnemonic == "ldrb",
ops.contains("[x19,#0x\(String(format: "%x", Self.secureRootMatchOffset))]")
{
sawFlagLoad = true
continue
}
if ins.mnemonic == "mov" || ins.mnemonic == "movk" || ins.mnemonic == "sub",
writesRegister(ins, regName: errRegName)
{
sawErrBuild = true
}
}
return sawFlagLoad && sawFlagTest && sawErrBuild
}
/// Walk backward from `off` to verify the match-store + cset,eq + cmp w0,#0 context.
private func hasSecureRootCompareContext(off: Int, funcStart: Int) -> Bool {
var sawMatchStore = false
var sawCsetEq = false
var sawCmpW0Zero = false
let lookbackStart = max(funcStart, off - 0xA0)
var probe = off - 4
while probe >= lookbackStart {
defer { probe -= 4 }
let insns = disasm.disassemble(in: buffer.data, at: probe, count: 1)
guard let ins = insns.first else { continue }
let ops = ins.operandString.replacingOccurrences(of: " ", with: "")
if !sawMatchStore, ins.mnemonic == "strb",
ops.contains("[x19,#0x\(String(format: "%x", Self.secureRootMatchOffset))]")
{
sawMatchStore = true
continue
}
if sawMatchStore, !sawCsetEq, ins.mnemonic == "cset", ops.hasSuffix(",eq") {
sawCsetEq = true
continue
}
if sawMatchStore, sawCsetEq, !sawCmpW0Zero, ins.mnemonic == "cmp",
ops.hasPrefix("w0,#0")
{
sawCmpW0Zero = true
break
}
}
return sawMatchStore && sawCsetEq && sawCmpW0Zero
}
/// Return true if the instruction writes to `regName` as its first operand.
private func writesRegister(_ ins: Instruction, regName: String) -> Bool {
guard let detail = ins.aarch64, !detail.operands.isEmpty else { return false }
let first = detail.operands[0]
guard first.type == AARCH64_OP_REG else { return false }
return (disasm.registerName(UInt32(first.reg.rawValue)) ?? "") == regName
}
/// Encode `mov <wReg>, #0` (MOVZ Wd, #0). E.g. "w22" MOVZ W22, #0.
private func encodeMovWReg0(_ regName: String) -> Data? {
guard regName.hasPrefix("w"), let numStr = regName.dropFirst().isEmpty ? nil : String(regName.dropFirst()),
let rd = UInt32(numStr), rd < 32 else { return nil }
// MOVZ Wd, #0 = 0x52800000 | rd
let insn: UInt32 = 0x5280_0000 | rd
return ARM64.encodeU32(insn)
}
}

View File

@@ -0,0 +1,98 @@
// KernelJBPatchSharedRegion.swift JB kernel patch: Shared region map bypass
//
// Python source: scripts/patchers/kernel_jb_patch_shared_region.py
import Capstone
import Foundation
extension KernelJBPatcher {
/// Force `cmp x0, x0` in the root-vs-preboot gate of
/// `_shared_region_map_and_slide_setup`.
///
/// Anchor: `/private/preboot/Cryptexes` string find the function that
/// contains it, then locate the unique `cmp Xm, Xn; b.eq; str xzr,...`
/// sequence just before the string reference.
@discardableResult
func patchSharedRegionMap() -> Bool {
log("\n[JB] _shared_region_map_and_slide_setup: upstream cmp x0,x0")
guard let strOff = buffer.findString("/private/preboot/Cryptexes") else {
log(" [-] Cryptexes string not found")
return false
}
// Find the function that contains this string reference
let refs = findStringRefs(strOff)
guard !refs.isEmpty else {
log(" [-] no code refs to Cryptexes string")
return false
}
guard let funcStart = findFunctionStart(refs[0].adrpOff) else {
log(" [-] function not found via Cryptexes anchor")
return false
}
let funcEnd = findFuncEnd(funcStart, maxSize: 0x2000)
// For each ADRP ref inside the function, search backward for
// cmp Xm, Xn / b.eq / str xzr,...
var hits: [Int] = []
for (adrpOff, _) in refs {
guard adrpOff >= funcStart, adrpOff < funcEnd else { continue }
if let patchOff = findUpstreamRootMountCmp(funcStart: funcStart, strRefOff: adrpOff) {
hits.append(patchOff)
}
}
guard hits.count == 1 else {
log(" [-] upstream root-vs-preboot cmp gate not found uniquely (found \(hits.count))")
return false
}
let patchOff = hits[0]
let va = fileOffsetToVA(patchOff)
emit(patchOff, ARM64.cmpX0X0,
patchID: "kernelcache_jb.shared_region_map",
virtualAddress: va,
description: "cmp x0,x0 [_shared_region_map_and_slide_setup]")
return true
}
// MARK: - Private helpers
/// Scan at most 9 instructions ending at `strRefOff` for the pattern:
/// cmp Xm, Xn
/// b.eq #forward
/// str xzr, [...]
private func findUpstreamRootMountCmp(funcStart: Int, strRefOff: Int) -> Int? {
let scanStart = max(funcStart, strRefOff - 0x24)
let scanEnd = min(strRefOff, scanStart + 0x24)
guard scanStart < scanEnd else { return nil }
var off = scanStart
while off < scanEnd {
let insns = disasm.disassemble(in: buffer.data, at: off, count: 3)
guard insns.count >= 3 else { off += 4; continue }
let cmpInsn = insns[0], beqInsn = insns[1], nextInsn = insns[2]
guard cmpInsn.mnemonic == "cmp", beqInsn.mnemonic == "b.eq" else { off += 4; continue }
guard let cmpOps = cmpInsn.aarch64?.operands, cmpOps.count == 2,
cmpOps[0].type == AARCH64_OP_REG, cmpOps[1].type == AARCH64_OP_REG
else { off += 4; continue }
guard let beqOps = beqInsn.aarch64?.operands, beqOps.count == 1,
beqOps[0].type == AARCH64_OP_IMM,
Int(beqOps[0].imm) > Int(beqInsn.address)
else { off += 4; continue }
// Next instruction must be `str xzr, [...]`
guard nextInsn.mnemonic == "str",
nextInsn.operandString.contains("xzr")
else { off += 4; continue }
return off
}
return nil
}
}

View File

@@ -0,0 +1,200 @@
// KernelJBPatchSpawnPersona.swift JB kernel patch: Spawn validate persona bypass
//
// Python source: scripts/patchers/kernel_jb_patch_spawn_persona.py
import Capstone
import Foundation
extension KernelJBPatcher {
/// NOP the upstream dual-CBZ bypass in the persona helper.
///
/// 1. Recover the outer spawn policy function from
/// `com.apple.private.spawn-panic-crash-behavior`.
/// 2. Enumerate its local BL callees.
/// 3. Choose the unique small callee whose local CFG matches:
/// `ldr [arg,#8] ; cbz deny ; ldr [arg,#0xc] ; cbz deny`.
/// 4. NOP both `cbz` guards.
@discardableResult
func patchSpawnValidatePersona() -> Bool {
log("\n[JB] _spawn_validate_persona: upstream dual-CBZ bypass")
guard let strOff = buffer.findString("com.apple.private.spawn-panic-crash-behavior") else {
log(" [-] spawn entitlement anchor not found")
return false
}
let refs = findStringRefs(strOff)
guard !refs.isEmpty, let anchorFunc = findFunctionStart(refs[0].adrpOff) else {
log(" [-] spawn entitlement anchor not found")
return false
}
let anchorEnd = findFuncEnd(anchorFunc, maxSize: 0x4000)
guard let sites = findUpstreamPersonaCbzSites(anchorStart: anchorFunc, anchorEnd: anchorEnd) else {
log(" [-] upstream persona helper not found from string anchor")
return false
}
let (firstCbz, secondCbz) = sites
let va1 = fileOffsetToVA(firstCbz)
let va2 = fileOffsetToVA(secondCbz)
emit(firstCbz, ARM64.nop,
patchID: "kernelcache_jb.spawn_validate_persona.cbz1",
virtualAddress: va1,
description: "NOP [_spawn_validate_persona pid-slot guard]")
emit(secondCbz, ARM64.nop,
patchID: "kernelcache_jb.spawn_validate_persona.cbz2",
virtualAddress: va2,
description: "NOP [_spawn_validate_persona persona-slot guard]")
return true
}
// MARK: - Private helpers
private func findUpstreamPersonaCbzSites(anchorStart: Int, anchorEnd: Int) -> (Int, Int)? {
var matches: [(Int, Int)] = []
var seen = Set<Int>()
for off in stride(from: anchorStart, to: anchorEnd, by: 4) {
guard let blTarget = decodeBL(at: off), !seen.contains(blTarget) else { continue }
guard jbIsInCodeRange(blTarget) else { continue }
seen.insert(blTarget)
let callee_end = findFuncEnd(blTarget, maxSize: 0x400)
if let sites = matchPersonaHelper(start: blTarget, end: callee_end) {
matches.append(sites)
}
}
if matches.count == 1 { return matches[0] }
if !matches.isEmpty {
let list = matches.map { String(format: "0x%X/0x%X", $0.0, $0.1) }.joined(separator: ", ")
log(" [-] ambiguous persona helper candidates: \(list)")
}
return nil
}
/// Match the upstream shape:
/// ldr wA, [x0, #8]
/// cbz wA, deny
/// ldr wB, [x0, #0xc]
/// cbz wB, deny
/// mov x?, #0
/// ldr x?, [x?, #0x490]
private func matchPersonaHelper(start: Int, end: Int) -> (Int, Int)? {
var hits: [(Int, Int)] = []
var off = start
while off + 0x14 < end {
let insns = disasm.disassemble(in: buffer.data, at: off, count: 6)
guard insns.count >= 6 else { off += 4; continue }
let i0 = insns[0], i1 = insns[1], i2 = insns[2]
let i3 = insns[3], i4 = insns[4], i5 = insns[5]
// ldr wA, [base, #8]
guard isLdrMem(i0, disp: 8) else { off += 4; continue }
guard let i0ops = i0.aarch64?.operands, i0ops.count >= 2 else { off += 4; continue }
let loadedReg0 = i0ops[0].reg
let baseReg = i0ops[1].mem.base
// cbz wA, deny
guard isCbzWSameReg(i1, reg: loadedReg0) else { off += 4; continue }
// ldr wB, [base, #0xc]
guard isLdrMemSameBase(i2, base: baseReg, disp: 0xC) else { off += 4; continue }
guard let i2ops = i2.aarch64?.operands, i2ops.count >= 1 else { off += 4; continue }
let loadedReg2 = i2ops[0].reg
// cbz wB, deny (same deny target)
guard isCbzWSameReg(i3, reg: loadedReg2) else { off += 4; continue }
guard let i1ops = i1.aarch64?.operands, i1ops.count == 2,
let i3ops = i3.aarch64?.operands, i3ops.count == 2,
i1ops[1].type == AARCH64_OP_IMM, i3ops[1].type == AARCH64_OP_IMM,
i1ops[1].imm == i3ops[1].imm
else { off += 4; continue }
let denyTarget = Int(i1ops[1].imm)
// Deny block must look like `mov w0, #1`
guard looksLikeErrnoReturn(target: denyTarget, value: 1) else { off += 4; continue }
// mov x?, #0
guard isMovXImmZero(i4) else { off += 4; continue }
// ldr x?, [x?, #0x490]
guard isLdrMem(i5, disp: 0x490) else { off += 4; continue }
hits.append((Int(i1.address), Int(i3.address)))
off += 4
}
return hits.count == 1 ? hits[0] : nil
}
private func isLdrMem(_ insn: Instruction, disp: Int32) -> Bool {
guard insn.mnemonic == "ldr",
let ops = insn.aarch64?.operands, ops.count >= 2,
ops[0].type == AARCH64_OP_REG,
ops[1].type == AARCH64_OP_MEM,
ops[1].mem.disp == disp
else { return false }
return true
}
private func isLdrMemSameBase(_ insn: Instruction, base: aarch64_reg, disp: Int32) -> Bool {
guard isLdrMem(insn, disp: disp),
let ops = insn.aarch64?.operands, ops.count >= 2,
ops[1].mem.base == base
else { return false }
return true
}
private func isCbzWSameReg(_ insn: Instruction, reg: aarch64_reg) -> Bool {
guard insn.mnemonic == "cbz",
let ops = insn.aarch64?.operands, ops.count == 2,
ops[0].type == AARCH64_OP_REG, ops[0].reg == reg,
ops[1].type == AARCH64_OP_IMM
else { return false }
// Must be a w-register
let name = insn.operandString.components(separatedBy: ",").first?
.trimmingCharacters(in: CharacterSet.whitespaces) ?? ""
return name.hasPrefix("w")
}
private func isMovXImmZero(_ insn: Instruction) -> Bool {
guard insn.mnemonic == "mov",
let ops = insn.aarch64?.operands, ops.count == 2,
ops[0].type == AARCH64_OP_REG,
ops[1].type == AARCH64_OP_IMM, ops[1].imm == 0
else { return false }
let name = insn.operandString.components(separatedBy: ",").first?
.trimmingCharacters(in: CharacterSet.whitespaces) ?? ""
return name.hasPrefix("x")
}
private func looksLikeErrnoReturn(target: Int, value: Int64) -> Bool {
guard target >= 0, target + 4 <= buffer.count else { return false }
let insns = disasm.disassemble(in: buffer.data, at: target, count: 1)
guard let insn = insns.first else { return false }
return isMovWImmValue(insn, imm: value)
}
private func isMovWImmValue(_ insn: Instruction, imm: Int64) -> Bool {
guard insn.mnemonic == "mov",
let ops = insn.aarch64?.operands, ops.count == 2,
ops[0].type == AARCH64_OP_REG,
ops[1].type == AARCH64_OP_IMM, ops[1].imm == imm
else { return false }
let name = insn.operandString.components(separatedBy: ",").first?
.trimmingCharacters(in: CharacterSet.whitespaces) ?? ""
return name.hasPrefix("w")
}
// MARK: - Instruction decode helpers
/// Decode a BL instruction at `off`, returning the target file offset or nil.
private func decodeBL(at off: Int) -> Int? {
guard off + 4 <= buffer.count else { return nil }
let insn = buffer.readU32(at: off)
guard insn >> 26 == 0b100101 else { return nil }
let imm26 = insn & 0x03FF_FFFF
let signedImm = Int32(bitPattern: imm26 << 6) >> 6
return off + Int(signedImm) * 4
}
}

View File

@@ -0,0 +1,363 @@
// KernelJBPatchSyscallmask.swift JB kernel patch: syscallmask C22 apply-to-proc
//
// Python source: scripts/patchers/kernel_jb_patch_syscallmask.py
//
// Strategy (retargeted C22): Hijack the low-level syscallmask apply wrapper.
// 1. Replace the pre-setter helper BL with `mov x17, x0` (save RO selector).
// 2. Replace the final tail `B setter` with `B cave`.
// 3. The cave:
// a. 0x100 bytes of 0xFF (all-allow mask blob).
// b. Code that calls zalloc_ro_mut (the mutation helper) to overwrite
// the process's syscall mask with the all-0xFF blob.
// c. Restores args and branches through to the normal setter.
//
// This preserves the normal setter path so all three mask types
// (unix syscall, mach trap, kernel MIG) go through their regular
// validation, but with an all-0xFF effective mask.
import Foundation
extension KernelJBPatcher {
// MARK: - Constants
private static let syscallmaskFFBlobSize = 0x100
// MARK: - Entry Point
/// Retargeted C22 patch: syscallmask apply to proc.
@discardableResult
func patchSyscallmaskApplyToProc() -> Bool {
log("\n[JB] _syscallmask_apply_to_proc: retargeted upstream C22")
// 1. Find the low-level apply wrapper.
guard let funcOff = findSyscallmaskApplyFunc() else {
log(" [-] syscallmask apply wrapper not found (fail-closed)")
return false
}
// 2. Find the pre-setter helper BL site.
guard let callOff = findSyscallmaskInjectBL(funcOff: funcOff) else {
log(" [-] helper BL site not found in syscallmask wrapper")
return false
}
// 3. Find the final tail branch into the setter core.
guard let (branchOff, setterOff) = findSyscallmaskTailBranch(funcOff: funcOff) else {
log(" [-] setter tail branch not found in syscallmask wrapper")
return false
}
// 4. Resolve the mutation helper (structural: next function after helper's containing func).
let helperTarget = jbDecodeBL(at: callOff) ?? -1
guard let mutatorOff = resolveSyscallmaskMutator(funcOff: funcOff, helperTarget: helperTarget) else {
log(" [-] syscallmask mutation helper not resolved structurally")
return false
}
// 5. Allocate cave: 0x100 blob + code.
let caveSize = Self.syscallmaskFFBlobSize + 0x80
guard let caveOff = findCodeCave(size: caveSize) else {
log(" [-] no executable code cave found for C22 (\(caveSize) bytes)")
return false
}
// 6. Build cave.
guard let (caveBytes, codeOff) = buildSyscallmaskCave(
caveOff: caveOff,
zallocOff: mutatorOff,
setterOff: setterOff
) else {
log(" [-] failed to encode C22 cave branches")
return false
}
// 7. Patch: redirect tail branch to cave entry (code section, not blob).
guard let branchToCave = encodeB(from: branchOff, to: codeOff) else {
log(" [-] tail branch cannot reach C22 cave")
return false
}
// mov x17, x0 (save RO selector that was in x0 before the pre-setter BL)
let movX17X0: UInt32 = 0xAA00_03F1 // ORR X17, XZR, X0
emit(callOff, ARM64.encodeU32(movX17X0),
patchID: "jb.syscallmask.save_selector",
description: "mov x17,x0 [syscallmask C22 save RO selector]")
emit(branchOff, branchToCave,
patchID: "jb.syscallmask.tail_redirect",
description: "b cave [syscallmask C22 mutate mask then setter]")
emit(caveOff, caveBytes,
patchID: "jb.syscallmask.c22_cave",
description: "syscallmask C22 cave (ff blob 0x\(String(format: "%X", Self.syscallmaskFFBlobSize)) + structural mutator + setter tail)")
return true
}
// MARK: - Function Finders
/// Find the high-level apply manager via its three error strings,
/// then find the low-level wrapper it calls three times.
private func findSyscallmaskApplyFunc() -> Int? {
// Try symbol lookup first
for name in ["_syscallmask_apply_to_proc", "_proc_apply_syscall_masks"] {
if let off = resolveSymbol(name) { return off }
}
// Find manager via error strings
guard let managerOff = findSyscallmaskManagerFunc() else { return nil }
// Find the callee that appears 3+ times in the manager with w1 = 0, 1, 2
return findSyscallmaskWrapperInManager(managerOff: managerOff)
}
/// Locate the high-level apply manager by its three error log strings.
private func findSyscallmaskManagerFunc() -> Int? {
let errorStrings = [
"failed to apply unix syscall mask",
"failed to apply mach trap mask",
"failed to apply kernel MIG routine mask",
]
var candidates: Set<Int>? = nil
for str in errorStrings {
guard let strOff = buffer.findString(str) else { return nil }
let refs = findStringRefs(strOff)
let funcStarts = Set(refs.compactMap { findFunctionStart($0.adrpOff) })
guard !funcStarts.isEmpty else { return nil }
if let c = candidates {
candidates = c.intersection(funcStarts)
} else {
candidates = funcStarts
}
guard let c = candidates, !c.isEmpty else { return nil }
}
return candidates?.min()
}
/// Find the wrapper callee that appears 3+ times and is called with w1=0,1,2.
private func findSyscallmaskWrapperInManager(managerOff: Int) -> Int? {
let funcEnd = findFuncEnd(managerOff, maxSize: 0x300)
var targetCalls: [Int: [Int]] = [:]
var off = managerOff
while off < funcEnd {
if let target = jbDecodeBL(at: off) {
targetCalls[target, default: []].append(off)
}
off += 4
}
// Find callee appearing 3+ times, called with distinct w1 immediates 0,1,2
for (target, calls) in targetCalls.sorted(by: { $0.value.count > $1.value.count }) {
guard calls.count >= 3 else { continue }
let whiches = Set(calls.compactMap { callOff in
extractW1ImmNearCall(funcOff: managerOff, callOff: callOff)
})
if whiches.isSuperset(of: [0, 1, 2]) { return target }
}
return nil
}
/// Best-effort: look back up to 0x20 bytes from a BL for the last `mov w1, #imm`.
private func extractW1ImmNearCall(funcOff: Int, callOff: Int) -> Int? {
let scanStart = max(funcOff, callOff - 0x20)
var off = callOff - 4
while off >= scanStart {
guard let insn = disasAt(off) else { off -= 4; continue }
let op = insn.operandString.replacingOccurrences(of: " ", with: "")
if insn.mnemonic == "mov", op.hasPrefix("w1,#") {
let imm = String(op.dropFirst(4))
if imm.hasPrefix("0x") || imm.hasPrefix("0X") {
if let v = Int(imm.dropFirst(2), radix: 16) { return v }
} else {
if let v = Int(imm) { return v }
}
}
off -= 4
}
return nil
}
/// Find the pre-setter helper BL site in the apply wrapper.
/// Python: scan forward from func start, after the first `cbz x2`, return the next BL.
private func findSyscallmaskInjectBL(funcOff: Int) -> Int? {
let funcEnd = findFuncEnd(funcOff, maxSize: 0x280)
let scanEnd = min(funcOff + 0x80, funcEnd)
var seenCbzX2 = false
var off = funcOff
while off < scanEnd {
guard let insn = disasAt(off) else { off += 4; continue }
let op = insn.operandString.replacingOccurrences(of: " ", with: "")
if insn.mnemonic == "cbz", op.hasPrefix("x2,") {
seenCbzX2 = true
} else if seenCbzX2, jbDecodeBL(at: off) != nil {
return off
}
off += 4
}
return nil
}
/// Find the final tail B into the setter core (last unconditional branch in the func).
private func findSyscallmaskTailBranch(funcOff: Int) -> (Int, Int)? {
let funcEnd = findFuncEnd(funcOff, maxSize: 0x280)
var off = funcEnd - 4
while off >= funcOff {
// Check for unconditional B
let val = buffer.readU32(at: off)
if (val & 0xFC00_0000) == 0x1400_0000 {
let imm26 = val & 0x3FFFFFF
let signedImm = Int32(bitPattern: imm26 << 6) >> 6
let target = off + Int(signedImm) * 4
let inText = kernTextRange.map { target >= $0.0 && target < $0.1 } ?? false
if inText, jbDecodeBL(at: off) == nil {
return (off, target)
}
}
off -= 4
}
return nil
}
/// Resolve the mutation helper: the function immediately following the helper's
/// containing function in text. It must start with PACIBSP or BTI.
private func resolveSyscallmaskMutator(funcOff _: Int, helperTarget: Int) -> Int? {
guard helperTarget >= 0 else { return nil }
guard let helperFunc = findFunctionStart(helperTarget) else { return nil }
let mutatorOff = findFuncEnd(helperFunc, maxSize: 0x200)
guard mutatorOff > helperTarget, mutatorOff < helperFunc + 0x200 else { return nil }
guard mutatorOff + 4 <= buffer.count else { return nil }
let headInsn = buffer.readU32(at: mutatorOff)
guard headInsn == ARM64.pacibspU32 || headInsn == 0xD503_241F /* bti */ else { return nil }
return mutatorOff
}
// MARK: - C22 Cave Builder
/// Build the C22 cave: 0x100 0xFF-blob + code section.
///
/// Returns (caveBytes, codeStartFoff) or nil on branch encoding failure.
///
/// The code section contract:
/// x0 = struct proc* (RO object, from original arg)
/// x1 = mask_type (original arg 1)
/// x2 = mask_ptr (original arg 2, the kernel mask buffer to overwrite)
/// x3 = mask_len (original arg 3)
/// x17 = original x0 (RO zalloc selector, saved by the injected `mov x17, x0`)
///
/// Cave code layout (27 instructions):
/// 0: cbz x2, #exit (skip if mask_ptr null)
/// 1: sub sp, sp, #0x40
/// 2: stp x19, x20, [sp, #0x10]
/// 3: stp x21, x22, [sp, #0x20]
/// 4: stp x29, x30, [sp, #0x30]
/// 5: mov x19, x0
/// 6: mov x20, x1
/// 7: mov x21, x2
/// 8: mov x22, x3
/// 9: mov x8, #8 (word size)
/// 10: mov x0, x17 (RO zalloc selector)
/// 11: mov x1, x21 (dst = mask_ptr)
/// 12: mov x2, #0 (src offset = 0)
/// 13: adr x3, #blobDelta (src = cave blob start)
/// 14: udiv x4, x22, x8 (x4 = mask_len / 8)
/// 15: msub x10, x4, x8, x22 (x10 = mask_len % 8)
/// 16: cbz x10, #8 (if exact multiple, skip +1)
/// 17: add x4, x4, #1 (round up)
/// 18: bl mutatorOff
/// 19: mov x0, x19
/// 20: mov x1, x20
/// 21: mov x2, x21
/// 22: mov x3, x22
/// 23: ldp x19, x20, [sp, #0x10]
/// 24: ldp x21, x22, [sp, #0x20]
/// 25: ldp x29, x30, [sp, #0x30]
/// 26: add sp, sp, #0x40
/// 27: b setterOff (tail-call into setter)
private func buildSyscallmaskCave(
caveOff: Int,
zallocOff: Int,
setterOff: Int
) -> (Data, Int)? {
let blobSize = Self.syscallmaskFFBlobSize
let codeOff = caveOff + blobSize
var code: [Data] = []
// 0: cbz x2, #exit (28 instrs * 4 = 0x70 jump to after add sp)
code.append(ARM64.encodeU32(0xB400_0622)) // cbz x2, #+0x6c
// 1: sub sp, sp, #0x40
code.append(ARM64.encodeU32(0xD101_03FF)) // sub sp, sp, #0x40
// 2: stp x19, x20, [sp, #0x10]
code.append(ARM64.encodeU32(0xA901_4FF3)) // stp x19, x20, [sp, #0x10]
// 3: stp x21, x22, [sp, #0x20]
code.append(ARM64.encodeU32(0xA902_57F5)) // stp x21, x22, [sp, #0x20]
// 4: stp x29, x30, [sp, #0x30]
code.append(ARM64.encodeU32(0xA903_7BFD)) // stp x29, x30, [sp, #0x30]
// 5: mov x19, x0
code.append(ARM64.encodeU32(0xAA00_03F3)) // mov x19, x0
// 6: mov x20, x1
code.append(ARM64.encodeU32(0xAA01_03F4)) // mov x20, x1
// 7: mov x21, x2
code.append(ARM64.encodeU32(0xAA02_03F5)) // mov x21, x2
// 8: mov x22, x3
code.append(ARM64.encodeU32(0xAA03_03F6)) // mov x22, x3
// 9: mov x8, #8
code.append(ARM64.encodeU32(0xD280_0108)) // movz x8, #8
// 10: mov x0, x17
code.append(ARM64.encodeU32(0xAA11_03E0)) // mov x0, x17
// 11: mov x1, x21
code.append(ARM64.encodeU32(0xAA15_03E1)) // mov x1, x21
// 12: mov x2, #0
code.append(ARM64.encodeU32(0xD280_0002)) // movz x2, #0
// 13: adr x3, #blobDelta (blob is at caveOff, code is at codeOff)
let adrOff = codeOff + code.count * 4
let blobDelta = caveOff - adrOff
// ADR x3, #delta: sf=0, op=1, immlo = delta & 3, immhi = (delta >> 2) & 0x7FFFF
// Encode: [31]=0, [30:29]=immlo, [28:24]=10000, [23:5]=immhi, [4:0]=Rd(3)
let adrImm = blobDelta
let immlo = UInt32(bitPattern: Int32(adrImm)) & 0x3
let immhi = (UInt32(bitPattern: Int32(adrImm)) >> 2) & 0x7FFFF
let adrInsn: UInt32 = (immlo << 29) | (0b10000 << 24) | (immhi << 5) | 3
code.append(ARM64.encodeU32(adrInsn))
// 14: udiv x4, x22, x8
code.append(ARM64.encodeU32(0x9AC8_0AC4)) // udiv x4, x22, x8
// 15: msub x10, x4, x8, x22
code.append(ARM64.encodeU32(0x9B08_5C8A)) // msub x10, x4, x8, x22
// 16: cbz x10, #8 (skip 2 instrs)
code.append(ARM64.encodeU32(0xB400_004A)) // cbz x10, #+8
// 17: add x4, x4, #1
code.append(ARM64.encodeU32(0x9100_0484)) // add x4, x4, #1
// 18: bl mutatorOff
let blOff = codeOff + code.count * 4
guard let blMutator = encodeBL(from: blOff, to: zallocOff) else { return nil }
code.append(blMutator)
// 19: mov x0, x19
code.append(ARM64.encodeU32(0xAA13_03E0)) // mov x0, x19
// 20: mov x1, x20
code.append(ARM64.encodeU32(0xAA14_03E1)) // mov x1, x20
// 21: mov x2, x21
code.append(ARM64.encodeU32(0xAA15_03E2)) // mov x2, x21
// 22: mov x3, x22
code.append(ARM64.encodeU32(0xAA16_03E3)) // mov x3, x22
// 23: ldp x19, x20, [sp, #0x10]
code.append(ARM64.encodeU32(0xA941_4FF3)) // ldp x19, x20, [sp, #0x10]
// 24: ldp x21, x22, [sp, #0x20]
code.append(ARM64.encodeU32(0xA942_57F5)) // ldp x21, x22, [sp, #0x20]
// 25: ldp x29, x30, [sp, #0x30]
code.append(ARM64.encodeU32(0xA943_7BFD)) // ldp x29, x30, [sp, #0x30]
// 26: add sp, sp, #0x40
code.append(ARM64.encodeU32(0x9101_03FF)) // add sp, sp, #0x40
// 27: b setterOff (tail-call)
let branchBackOff = codeOff + code.count * 4
guard let branchBack = encodeB(from: branchBackOff, to: setterOff) else { return nil }
code.append(branchBack)
let codeBytes = code.reduce(Data(), +)
let blobBytes = Data(repeating: 0xFF, count: blobSize)
return (blobBytes + codeBytes, codeOff)
}
}

View File

@@ -0,0 +1,160 @@
// KernelJBPatchTaskConversion.swift JB kernel patch: Task conversion eval bypass
//
// Python source: scripts/patchers/kernel_jb_patch_task_conversion.py
//
// Strategy (fast raw scanner):
// Locate the unique guard site in _task_conversion_eval_internal that performs:
// ADRP Xn, <global> ; [off - 8] loads global task-conversion table
// LDR Xn, [Xn] ; [off - 4] dereferences it
// CMP Xn, X0 ; [off + 0] compare task pointer against X0
// B.EQ <skip1> ; [off + 4]
// CMP Xn, X1 ; [off + 8] compare against X1
// B.EQ <skip2> ; [off + 12]
// MOV X19, X0 ; [off + 16]
// MOV X0, X1 ; [off + 20]
// BL <callee> ; [off + 24]
// CBZ/CBNZ W0, ... ; [off + 28]
// Patch: replace CMP Xn, X0 with CMP XZR, XZR so the equality check always passes.
import Foundation
extension KernelJBPatcher {
/// Task conversion eval bypass: patch the guard CMP to always be equal.
@discardableResult
func patchTaskConversionEvalInternal() -> Bool {
log("\n[JB] task_conversion_eval_internal: cmp xzr,xzr")
guard let codeRange = codeRanges.first else { return false }
let (ks, ke) = (codeRange.start, codeRange.end)
let candidates = collectTaskConversionCandidates(start: ks, end: ke)
guard candidates.count == 1 else {
log(" [-] expected 1 task-conversion guard site, found \(candidates.count)")
return false
}
let site = candidates[0]
let va = fileOffsetToVA(site)
emit(site, ARM64.cmpXzrXzr,
patchID: "task_conversion_eval",
virtualAddress: va,
description: "cmp xzr,xzr [_task_conversion_eval_internal]")
return true
}
// MARK: - Private scanner
private func collectTaskConversionCandidates(start: Int, end: Int) -> [Int] {
// Derived masks no hardcoded opcode bytes:
// CMP Xn, X0 = SUBS XZR, Xn, X0 bits [31:21]=1110_1011_000, [20:16]=X0=00000,
// [15:10]=000000, [9:5]=Rn, [4:0]=11111(XZR)
// Mask covers the fixed opcode and X0 operand; leaves Rn free.
let cmpXnX0Mask: UInt32 = 0xFFE0_FC1F
let cmpXnX0Val: UInt32 = 0xEB00_001F // cmp Xn, X0 Rn wildcard
// CMP Xn, X1 = SUBS XZR, Xn, X1 Rm=X1=00001
let cmpXnX1Mask: UInt32 = 0xFFE0_FC1F
let cmpXnX1Val: UInt32 = 0xEB01_001F // cmp Xn, X1 Rn wildcard
// B.EQ #offset bits[31:24]=0101_0100, bit[4]=0, bits[3:0]=0000 (EQ cond)
let beqMask: UInt32 = 0xFF00_001F
let beqVal: UInt32 = 0x5400_0000 // b.eq with any imm19
// LDR Xd, [Xn] (unsigned offset, size=3):
// bits [31:22] fixed = 0xF94 (size=11, V=0, opc=01, class=01);
// bits [21:10] = imm12, bits [9:5] = Rn, bits [4:0] = Rt all variable.
let ldrXUnsignedMask: UInt32 = 0xFFC0_0000 // leaves imm12, Rn, Rt free
let ldrXUnsignedVal: UInt32 = 0xF940_0000
// ADRP: bit[31]=1, bits[28:24]=10000
let adrpMask: UInt32 = 0x9F00_0000
let adrpVal: UInt32 = 0x9000_0000
// MOV X19, X0 = ORR X19, XZR, X0
let movX19X0: UInt32 = 0xAA00_03F3
// MOV X0, X1 = ORR X0, XZR, X1
let movX0X1: UInt32 = 0xAA01_03E0
// BL mask
let blMask: UInt32 = 0xFC00_0000
let blVal: UInt32 = 0x9400_0000
// CBZ/CBNZ W (32-bit): bits[31]=0, bits[30:25]=011010 / 011011
let cbzWMask: UInt32 = 0x7F00_0000
let cbzWVal: UInt32 = 0x3400_0000 // CBZ W
let cbnzWVal: UInt32 = 0x3500_0000 // CBNZ W
var out: [Int] = []
var off = start + 8
while off < end - 28 {
defer { off += 4 }
// [off]: CMP Xn, X0
let i0 = buffer.readU32(at: off)
guard (i0 & cmpXnX0Mask) == cmpXnX0Val else { continue }
let cmpRn = (i0 >> 5) & 0x1F // the register being compared
// [off - 4]: LDR Xn, [Xn] (load into cmpRn from cmpRn)
let prev = buffer.readU32(at: off - 4)
guard (prev & ldrXUnsignedMask) == ldrXUnsignedVal else { continue }
let pRt = prev & 0x1F
let pRn = (prev >> 5) & 0x1F
guard pRt == cmpRn, pRn == cmpRn else { continue }
// [off + 4]: B.EQ
let i1 = buffer.readU32(at: off + 4)
guard (i1 & beqMask) == beqVal else { continue }
// [off + 8]: CMP Xn, X1 (same register)
let i2 = buffer.readU32(at: off + 8)
guard (i2 & cmpXnX1Mask) == cmpXnX1Val else { continue }
guard ((i2 >> 5) & 0x1F) == cmpRn else { continue }
// [off + 12]: B.EQ
let i3 = buffer.readU32(at: off + 12)
guard (i3 & beqMask) == beqVal else { continue }
// Context safety: ADRP at [off - 8] for same register
let p2 = buffer.readU32(at: off - 8)
guard (p2 & adrpMask) == adrpVal else { continue }
guard (p2 & 0x1F) == cmpRn else { continue }
// [off + 16]: MOV X19, X0
guard buffer.readU32(at: off + 16) == movX19X0 else { continue }
// [off + 20]: MOV X0, X1
guard buffer.readU32(at: off + 20) == movX0X1 else { continue }
// [off + 24]: BL
let i6 = buffer.readU32(at: off + 24)
guard (i6 & blMask) == blVal else { continue }
// [off + 28]: CBZ or CBNZ W0
let i7 = buffer.readU32(at: off + 28)
let op7 = i7 & cbzWMask
guard op7 == cbzWVal || op7 == cbnzWVal else { continue }
guard (i7 & 0x1F) == 0 else { continue } // must be W0
// B.EQ targets must be forward and nearby (within same function)
guard let t1 = decodeBEQTarget(insn: i1, at: off + 4) else { continue }
guard let t2 = decodeBEQTarget(insn: i3, at: off + 12) else { continue }
guard t1 > off, t2 > off else { continue }
guard (t1 - off) <= 0x200, (t2 - off) <= 0x200 else { continue }
out.append(off)
}
return out
}
/// Decode a B.cond target offset from an instruction at `pc`.
private func decodeBEQTarget(insn: UInt32, at pc: Int) -> Int? {
// B.cond: bits[31:24] = 0x54, bits[23:5] = imm19, bits[4] = 0, bits[3:0] = cond
guard (insn & 0xFF00_001E) == 0x5400_0000 else { return nil }
let imm19 = (insn >> 5) & 0x7FFFF
// Sign-extend 19 bits
let signedImm = Int32(bitPattern: imm19 << 13) >> 13
return pc + Int(signedImm) * 4
}
}

View File

@@ -0,0 +1,156 @@
// KernelJBPatchTaskForPid.swift JB kernel patch: task_for_pid bypass
//
// Python source: scripts/patchers/kernel_jb_patch_task_for_pid.py
import Capstone
import Foundation
extension KernelJBPatcher {
/// NOP the upstream early `pid == 0` reject gate in `task_for_pid`.
///
/// Anchor: `proc_ro_ref_task` string enclosing function.
/// Shape:
/// ldr wPid, [xArgs, #8]
/// ldr xTaskPtr, [xArgs, #0x10]
/// ...
/// cbz wPid, fail
/// mov w1, #0
/// mov w2, #0
/// mov w3, #0
/// mov x4, #0
/// bl port_name_to_task-like helper
/// cbz x0, fail (same fail target)
@discardableResult
func patchTaskForPid() -> Bool {
log("\n[JB] _task_for_pid: upstream pid==0 gate NOP")
guard let strOff = buffer.findString("proc_ro_ref_task") else {
log(" [-] task_for_pid anchor function not found")
return false
}
let refs = findStringRefs(strOff)
guard !refs.isEmpty, let funcStart = findFunctionStart(refs[0].adrpOff) else {
log(" [-] task_for_pid anchor function not found")
return false
}
let searchEnd = min(buffer.count, funcStart + 0x800)
var hits: [Int] = []
var off = funcStart
while off + 0x18 < searchEnd {
let insns = disasm.disassemble(in: buffer.data, at: off, count: 1)
guard let first = insns.first, first.mnemonic == "cbz" else { off += 4; continue }
if let site = matchUpstreamTaskForPidGate(at: off, funcStart: funcStart) {
hits.append(site)
}
off += 4
}
guard hits.count == 1 else {
log(" [-] expected 1 upstream task_for_pid candidate, found \(hits.count)")
return false
}
let patchOff = hits[0]
let va = fileOffsetToVA(patchOff)
emit(patchOff, ARM64.nop,
patchID: "kernelcache_jb.task_for_pid",
virtualAddress: va,
description: "NOP [_task_for_pid pid==0 gate]")
return true
}
// MARK: - Private helpers
private func matchUpstreamTaskForPidGate(at off: Int, funcStart: Int) -> Int? {
let insns = disasm.disassemble(in: buffer.data, at: off, count: 7)
guard insns.count >= 7 else { return nil }
let cbzPid = insns[0], mov1 = insns[1], mov2 = insns[2]
let mov3 = insns[3], mov4 = insns[4], blInsn = insns[5], cbzRet = insns[6]
// cbz wPid, fail
guard cbzPid.mnemonic == "cbz",
let cbzPidOps = cbzPid.aarch64?.operands, cbzPidOps.count == 2,
cbzPidOps[0].type == AARCH64_OP_REG,
cbzPidOps[1].type == AARCH64_OP_IMM
else { return nil }
let failTarget = cbzPidOps[1].imm
// mov w1, #0 / mov w2, #0 / mov w3, #0 / mov x4, #0
guard isMovImmZero(mov1, dstName: "w1"),
isMovImmZero(mov2, dstName: "w2"),
isMovImmZero(mov3, dstName: "w3"),
isMovImmZero(mov4, dstName: "x4")
else { return nil }
// bl helper
guard blInsn.mnemonic == "bl" else { return nil }
// cbz x0, fail (same target)
guard cbzRet.mnemonic == "cbz",
let cbzRetOps = cbzRet.aarch64?.operands, cbzRetOps.count == 2,
cbzRetOps[0].type == AARCH64_OP_REG,
cbzRetOps[1].type == AARCH64_OP_IMM,
cbzRetOps[1].imm == failTarget
else { return nil }
// x0
let retRegName = cbzRet.operandString.components(separatedBy: ",").first?
.trimmingCharacters(in: CharacterSet.whitespaces) ?? ""
guard retRegName == "x0" else { return nil }
// Look backward for ldr wPid, [x?, #8] and ldr xTaskPtr, [x?, #0x10]
let scanStart = max(funcStart, off - 0x18)
var pidLoad: Instruction? = nil
var taskptrLoad: Instruction? = nil
var prevOff = scanStart
while prevOff < off {
let prevInsns = disasm.disassemble(in: buffer.data, at: prevOff, count: 1)
guard let prev = prevInsns.first else { prevOff += 4; continue }
if pidLoad == nil, isWLdrFromXImm(prev, imm: 8) { pidLoad = prev }
if taskptrLoad == nil, isXLdrFromXImm(prev, imm: 0x10) { taskptrLoad = prev }
prevOff += 4
}
guard let pid = pidLoad, taskptrLoad != nil else { return nil }
// pid register must match cbz operand
guard let pidOps = pid.aarch64?.operands, !pidOps.isEmpty,
pidOps[0].reg == cbzPidOps[0].reg
else { return nil }
return off
}
private func isMovImmZero(_ insn: Instruction, dstName: String) -> Bool {
guard insn.mnemonic == "mov",
let ops = insn.aarch64?.operands, ops.count == 2,
ops[0].type == AARCH64_OP_REG,
ops[1].type == AARCH64_OP_IMM, ops[1].imm == 0
else { return false }
let name = insn.operandString.components(separatedBy: ",").first?
.trimmingCharacters(in: CharacterSet.whitespaces) ?? ""
return name == dstName
}
private func isWLdrFromXImm(_ insn: Instruction, imm: Int32) -> Bool {
guard insn.mnemonic == "ldr",
let ops = insn.aarch64?.operands, ops.count >= 2,
ops[0].type == AARCH64_OP_REG,
ops[1].type == AARCH64_OP_MEM,
ops[1].mem.disp == imm
else { return false }
let dstName = insn.operandString.components(separatedBy: ",").first?
.trimmingCharacters(in: CharacterSet.whitespaces) ?? ""
return dstName.hasPrefix("w")
}
private func isXLdrFromXImm(_ insn: Instruction, imm: Int32) -> Bool {
guard insn.mnemonic == "ldr",
let ops = insn.aarch64?.operands, ops.count >= 2,
ops[0].type == AARCH64_OP_REG,
ops[1].type == AARCH64_OP_MEM,
ops[1].mem.disp == imm
else { return false }
let dstName = insn.operandString.components(separatedBy: ",").first?
.trimmingCharacters(in: CharacterSet.whitespaces) ?? ""
return dstName.hasPrefix("x")
}
}

View File

@@ -0,0 +1,60 @@
// KernelJBPatchThidCrash.swift JB kernel patch: thid_should_crash bypass
//
// Python source: scripts/patchers/kernel_jb_patch_thid_crash.py
import Foundation
extension KernelJBPatcher {
/// Zero out `_thid_should_crash` via the nearby sysctl metadata.
///
/// The raw PCC 26.1 kernels do not provide a usable runtime symbol table,
/// so this patch always resolves through the sysctl name string
/// `thid_should_crash` and the adjacent `sysctl_oid` data.
@discardableResult
func patchThidShouldCrash() -> Bool {
log("\n[JB] _thid_should_crash: zero out")
guard let strOff = buffer.findString("thid_should_crash") else {
log(" [-] string not found")
return false
}
log(" [*] string at foff 0x\(String(format: "%X", strOff))")
// Find DATA_CONST ranges for validation
let dataConstRanges: [(Int, Int)] = segments.compactMap { seg in
guard seg.name == "__DATA_CONST", seg.fileSize > 0 else { return nil }
return (Int(seg.fileOffset), Int(seg.fileOffset + seg.fileSize))
}
let dataRanges: [(Int, Int)] = segments.compactMap { seg in
guard seg.name.contains("DATA"), seg.fileSize > 0 else { return nil }
return (Int(seg.fileOffset), Int(seg.fileOffset + seg.fileSize))
}
// Scan up to 128 bytes forward from string for a sysctl_oid pointer
for delta in stride(from: 0, through: 128, by: 8) {
let check = strOff + delta
guard check + 8 <= buffer.count else { break }
let val = buffer.readU64(at: check)
guard val != 0 else { continue }
let low32 = Int(val & 0xFFFF_FFFF)
guard low32 > 0, low32 < buffer.count else { continue }
let targetVal = buffer.readU32(at: low32)
guard targetVal >= 1, targetVal <= 255 else { continue }
let inDataConst = dataConstRanges.contains { $0.0 <= low32 && low32 < $0.1 }
let inData = inDataConst || dataRanges.contains { $0.0 <= low32 && low32 < $0.1 }
guard inData else { continue }
log(" [+] variable at foff 0x\(String(format: "%X", low32)) (value=\(targetVal), found via sysctl_oid at str+0x\(String(format: "%X", delta)))")
let va = fileOffsetToVA(low32)
emit(low32, Data([0, 0, 0, 0]),
patchID: "kernelcache_jb.thid_should_crash",
virtualAddress: va,
description: "zero [_thid_should_crash]")
return true
}
log(" [-] variable not found")
return false
}
}

View File

@@ -0,0 +1,135 @@
// KernelJBPatchVmFault.swift JB kernel patch: VM fault enter prepare bypass
//
// Python source: scripts/patchers/kernel_jb_patch_vm_fault.py
import Capstone
import Foundation
extension KernelJBPatcher {
/// Force the upstream cs_bypass fast-path in `_vm_fault_enter_prepare`.
///
/// Expected semantic shape:
/// ... early in prologue: LDR Wflags, [fault_info_reg, #0x28]
/// ... later: TBZ Wflags, #3, validation_path
/// MOV Wtainted, #0
/// B post_validation_success
///
/// NOPing the TBZ forces the fast-path unconditionally.
@discardableResult
func patchVmFaultEnterPrepare() -> Bool {
log("\n[JB] _vm_fault_enter_prepare: NOP")
var candidateFuncs: [Int] = []
// Strategy 1: symbol table lookup
if let funcOff = resolveSymbol("_vm_fault_enter_prepare") {
candidateFuncs.append(funcOff)
}
// Strategy 2: string anchor
if let strOff = buffer.findString("vm_fault_enter_prepare") {
let refs = findStringRefs(strOff)
for (adrpOff, _) in refs {
if let funcOff = findFunctionStart(adrpOff) {
candidateFuncs.append(funcOff)
}
}
}
var candidateSites = Set<Int>()
for funcStart in Set(candidateFuncs) {
let funcEnd = findFuncEnd(funcStart, maxSize: 0x4000)
if let site = findCsBypassGate(start: funcStart, end: funcEnd) {
candidateSites.insert(site)
}
}
if candidateSites.count == 1 {
let patchOff = candidateSites.first!
let va = fileOffsetToVA(patchOff)
emit(patchOff, ARM64.nop,
patchID: "kernelcache_jb.vm_fault_enter_prepare",
virtualAddress: va,
description: "NOP [_vm_fault_enter_prepare]")
return true
} else if candidateSites.count > 1 {
let list = candidateSites.sorted().map { String(format: "0x%X", $0) }.joined(separator: ", ")
log(" [-] ambiguous vm_fault_enter_prepare candidates: \(list)")
return false
}
log(" [-] patch site not found")
return false
}
// MARK: - Private helpers
/// Find the unique `tbz Wflags, #3 / mov Wt, #0 / b ...` gate inside
/// a function, where Wflags is the register loaded from [base, #0x28]
/// in the function prologue.
private func findCsBypassGate(start: Int, end: Int) -> Int? {
// Pass 1: collect registers loaded from [*, #0x28] in first 0x120 bytes
var flagRegs = Set<UInt32>()
let prologueEnd = min(end, start + 0x120)
var off = start
while off < prologueEnd {
let insns = disasm.disassemble(in: buffer.data, at: off, count: 1)
guard let insn = insns.first else { off += 4; continue }
if insn.mnemonic == "ldr",
let ops = insn.aarch64?.operands, ops.count >= 2,
ops[0].type == AARCH64_OP_REG,
ops[1].type == AARCH64_OP_MEM,
ops[1].mem.base != AARCH64_REG_INVALID,
ops[1].mem.disp == 0x28
{
let dstName = insn.operandString.components(separatedBy: ",").first?.trimmingCharacters(in: .whitespaces) ?? ""
if dstName.hasPrefix("w") {
flagRegs.insert(ops[0].reg.rawValue)
}
}
off += 4
}
guard !flagRegs.isEmpty else { return nil }
// Pass 2: scan for TBZ Wflags, #3, target / MOV Wt, #0 / B target2
var hits: [Int] = []
let scanStart = max(start + 0x80, start)
off = scanStart
while off + 8 < end {
let insns = disasm.disassemble(in: buffer.data, at: off, count: 1)
guard let gate = insns.first else { off += 4; continue }
guard gate.mnemonic == "tbz",
let gateOps = gate.aarch64?.operands, gateOps.count == 3,
gateOps[0].type == AARCH64_OP_REG,
flagRegs.contains(gateOps[0].reg.rawValue),
gateOps[1].type == AARCH64_OP_IMM, gateOps[1].imm == 3,
gateOps[2].type == AARCH64_OP_IMM
else { off += 4; continue }
// Check mov Wt, #0
let movInsns = disasm.disassemble(in: buffer.data, at: off + 4, count: 1)
guard let movInsn = movInsns.first,
movInsn.mnemonic == "mov",
let movOps = movInsn.aarch64?.operands, movOps.count == 2,
movOps[0].type == AARCH64_OP_REG,
movOps[1].type == AARCH64_OP_IMM, movOps[1].imm == 0
else { off += 4; continue }
let movDstName = movInsn.operandString.components(separatedBy: ",").first?.trimmingCharacters(in: .whitespaces) ?? ""
guard movDstName.hasPrefix("w") else { off += 4; continue }
// Check unconditional B
let bInsns = disasm.disassemble(in: buffer.data, at: off + 8, count: 1)
guard let bInsn = bInsns.first,
bInsn.mnemonic == "b",
let bOps = bInsn.aarch64?.operands, bOps.count == 1,
bOps[0].type == AARCH64_OP_IMM
else { off += 4; continue }
hits.append(off)
off += 4
}
return hits.count == 1 ? hits[0] : nil
}
}

View File

@@ -0,0 +1,141 @@
// KernelJBPatchVmProtect.swift JB kernel patch: VM map protect bypass
//
// Python source: scripts/patchers/kernel_jb_patch_vm_protect.py
import Capstone
import Foundation
extension KernelJBPatcher {
/// Skip the vm_map_protect write-downgrade gate.
///
/// Source-backed anchor: recover the function from the in-kernel
/// `vm_map_protect(` panic string, then find the unique local block matching:
///
/// mov wMask, #6
/// bics wzr, wMask, wProt
/// b.ne skip
/// tbnz wEntryFlags, #22, skip
/// ...
/// and wProt, wProt, #~VM_PROT_WRITE
///
/// Rewriting `b.ne` to unconditional `b` always skips the downgrade block.
@discardableResult
func patchVmMapProtect() -> Bool {
log("\n[JB] _vm_map_protect: skip write-downgrade gate")
// Find function via "vm_map_protect(" string
guard let strOff = buffer.findString("vm_map_protect(") else {
log(" [-] kernel-text 'vm_map_protect(' anchor not found")
return false
}
let refs = findStringRefs(strOff)
guard !refs.isEmpty, let funcStart = findFunctionStart(refs[0].adrpOff) else {
log(" [-] kernel-text 'vm_map_protect(' anchor not found")
return false
}
let funcEnd = findFuncEnd(funcStart, maxSize: 0x2000)
guard let gate = findWriteDowngradeGate(start: funcStart, end: funcEnd) else {
log(" [-] vm_map_protect write-downgrade gate not found")
return false
}
let (brOff, target) = gate
guard let bBytes = encodeB(from: brOff, to: target) else {
log(" [-] branch rewrite out of range")
return false
}
let va = fileOffsetToVA(brOff)
let delta = target - brOff
emit(brOff, bBytes,
patchID: "kernelcache_jb.vm_map_protect",
virtualAddress: va,
description: "b #0x\(String(format: "%X", delta)) [_vm_map_protect]")
return true
}
// MARK: - Private helpers
/// Find the `b.ne` instruction address and its target in the write-downgrade block.
private func findWriteDowngradeGate(start: Int, end: Int) -> (brOff: Int, target: Int)? {
let wZrReg: aarch64_reg = AARCH64_REG_WZR
var hits: [(Int, Int)] = []
var off = start
while off + 0x10 < end {
let insns = disasm.disassemble(in: buffer.data, at: off, count: 4)
guard insns.count >= 4 else { off += 4; continue }
let movMask = insns[0], bicsInsn = insns[1], bneInsn = insns[2], tbnzInsn = insns[3]
// mov wMask, #6
guard movMask.mnemonic == "mov",
let movOps = movMask.aarch64?.operands, movOps.count == 2,
movOps[0].type == AARCH64_OP_REG,
movOps[1].type == AARCH64_OP_IMM, movOps[1].imm == 6
else { off += 4; continue }
let maskReg = movOps[0].reg
// bics wzr, wMask, wProt
guard bicsInsn.mnemonic == "bics",
let bicsOps = bicsInsn.aarch64?.operands, bicsOps.count == 3,
bicsOps[0].type == AARCH64_OP_REG, bicsOps[0].reg == wZrReg,
bicsOps[1].type == AARCH64_OP_REG, bicsOps[1].reg == maskReg,
bicsOps[2].type == AARCH64_OP_REG
else { off += 4; continue }
let protReg = bicsOps[2].reg
// b.ne <skip>
guard bneInsn.mnemonic == "b.ne",
let bneOps = bneInsn.aarch64?.operands, bneOps.count == 1,
bneOps[0].type == AARCH64_OP_IMM
else { off += 4; continue }
let skipTarget = Int(bneOps[0].imm)
guard skipTarget > Int(bneInsn.address) else { off += 4; continue }
// tbnz wEntryFlags, #22, <skip>
guard tbnzInsn.mnemonic == "tbnz",
let tbnzOps = tbnzInsn.aarch64?.operands, tbnzOps.count == 3,
tbnzOps[0].type == AARCH64_OP_REG,
tbnzOps[1].type == AARCH64_OP_IMM, tbnzOps[1].imm == 22,
tbnzOps[2].type == AARCH64_OP_IMM, Int(tbnzOps[2].imm) == skipTarget
else { off += 4; continue }
// Verify there's an `and wProt, wProt, #~2` between tbnz+4 and target
let searchStart = Int(tbnzInsn.address) + 4
let searchEnd = min(skipTarget, end)
guard findWriteClearBetween(start: searchStart, end: searchEnd, protReg: protReg) != nil
else { off += 4; continue }
// bneInsn.address is a virtual-like address (== file offset here)
let bneFileOff = Int(bneInsn.address)
hits.append((bneFileOff, skipTarget))
off += 4
}
return hits.count == 1 ? hits[0] : nil
}
/// Scan [start, end) for `and wProt, wProt, #imm` where imm clears bit 1 (VM_PROT_WRITE).
private func findWriteClearBetween(start: Int, end: Int, protReg: aarch64_reg) -> Int? {
var off = start
while off < end {
let insns = disasm.disassemble(in: buffer.data, at: off, count: 1)
guard let insn = insns.first else { off += 4; continue }
if insn.mnemonic == "and",
let ops = insn.aarch64?.operands, ops.count == 3,
ops[0].type == AARCH64_OP_REG, ops[0].reg == protReg,
ops[1].type == AARCH64_OP_REG, ops[1].reg == protReg,
ops[2].type == AARCH64_OP_IMM
{
let imm = UInt32(bitPattern: Int32(truncatingIfNeeded: ops[2].imm)) & 0xFFFF_FFFF
// Clears bit 1 (VM_PROT_WRITE=2), keeps bit 0 (VM_PROT_READ=1)
if (imm & 0x7) == 0x3 {
return off
}
}
off += 4
}
return nil
}
}

View File

@@ -0,0 +1,62 @@
// KernelJBPatcher.swift JB kernel patcher orchestrator.
//
// Python source: scripts/patchers/kernel_jb.py
import Foundation
/// JB kernel patcher: 84 patches across 3 groups.
///
/// Group A: Core gate-bypass methods (5 patches)
/// Group B: Pattern/string anchored methods (16 patches)
/// Group C: Shellcode/trampoline heavy methods (4 patches)
public final class KernelJBPatcher: KernelJBPatcherBase, Patcher {
public let component = "kernelcache_jb"
public func findAll() throws -> [PatchRecord] {
try parseMachO()
buildADRPIndex()
buildBLIndex()
buildSymbolTable()
findPanic()
// Group A
patchAmfiCdhashInTrustcache()
patchTaskConversionEvalInternal()
patchSandboxHooksExtended()
patchIoucFailedMacf()
// Group B
patchPostValidationAdditional()
patchProcSecurityPolicy()
patchProcPidinfo()
patchConvertPortToMap()
patchBsdInitAuth()
patchDounmount()
patchIoSecureBsdRoot()
patchLoadDylinker()
patchMacMount()
patchNvramVerifyPermission()
patchSharedRegionMap()
patchSpawnValidatePersona()
patchTaskForPid()
patchThidShouldCrash()
patchVmFaultEnterPrepare()
patchVmMapProtect()
// Group C
patchCredLabelUpdateExecve()
patchHookCredLabelUpdateExecve()
patchKcall10()
patchSyscallmaskApplyToProc()
return patches
}
public func apply() throws -> Int {
let patches = try findAll()
for record in patches {
buffer.writeBytes(at: record.fileOffset, bytes: record.patchedBytes)
}
return patches.count
}
}

View File

@@ -0,0 +1,440 @@
// KernelJBPatcherBase.swift JB kernel patcher base with extended infrastructure.
//
// Python source: scripts/patchers/kernel_jb_base.py
import Capstone
import Foundation
/// Base class for JB kernel patching, extending KernelPatcherBase with:
/// - Symbol table parsing (nlist64 from LC_SYMTAB + fileset entries)
/// - Code cave finder (zeros/0xFF/UDF in executable segments)
/// - Branch encoding helpers (encodeB, encodeBL)
/// - Function boundary finders (findFuncEnd, findBLToPanicInRange)
/// - String-anchored function finders
/// - proc_info anchor cache
public class KernelJBPatcherBase: KernelPatcherBase {
/// Symbol name file offset map, built from nlist64 entries.
var symbols: [String: Int] = [:]
/// Cached proc_info anchor (func_start, switch_off).
private var procInfoAnchor: (Int, Int)?
private var procInfoAnchorScanned = false
/// JB scan cache for expensive searches.
var jbScanCache: [String: Int] = [:]
// MARK: - Symbol Table
/// Build symbol table from LC_SYMTAB in the main Mach-O header AND all
/// LC_FILESET_ENTRY sub-Mach-Os.
///
/// Reads from `buffer.original` (Python: `self.raw`) so the table reflects
/// unpatched data. Populates `self.symbols` with name file offset.
func buildSymbolTable() {
symbols = [:]
let raw = buffer.original
guard raw.count > 32 else { return }
let ncmds: UInt32 = raw.withUnsafeBytes { $0.load(fromByteOffset: 16, as: UInt32.self) }
// Pass 1: top-level LC_SYMTAB
var tempOff = 32
for _ in 0 ..< ncmds {
guard tempOff + 8 <= raw.count else { break }
let cmd: UInt32 = raw.withUnsafeBytes { $0.load(fromByteOffset: tempOff, as: UInt32.self) }
let cmdsize: UInt32 = raw.withUnsafeBytes { $0.load(fromByteOffset: tempOff + 4, as: UInt32.self) }
guard cmdsize >= 8 else { break }
if cmd == 0x2, tempOff + 20 <= raw.count { // LC_SYMTAB
let symoff: UInt32 = raw.withUnsafeBytes { $0.load(fromByteOffset: tempOff + 8, as: UInt32.self) }
let nsyms: UInt32 = raw.withUnsafeBytes { $0.load(fromByteOffset: tempOff + 12, as: UInt32.self) }
let stroff: UInt32 = raw.withUnsafeBytes { $0.load(fromByteOffset: tempOff + 16, as: UInt32.self) }
parseNlist(symoff: Int(symoff), nsyms: Int(nsyms), stroff: Int(stroff))
}
tempOff += Int(cmdsize)
}
// Pass 2: LC_FILESET_ENTRY sub-Mach-Os
tempOff = 32
for _ in 0 ..< ncmds {
guard tempOff + 8 <= raw.count else { break }
let cmd: UInt32 = raw.withUnsafeBytes { $0.load(fromByteOffset: tempOff, as: UInt32.self) }
let cmdsize: UInt32 = raw.withUnsafeBytes { $0.load(fromByteOffset: tempOff + 4, as: UInt32.self) }
guard cmdsize >= 8 else { break }
if cmd == 0x8000_0035, tempOff + 24 <= raw.count { // LC_FILESET_ENTRY: fileoff at +16 (u64)
let foffEntry: UInt64 = raw.withUnsafeBytes { $0.load(fromByteOffset: tempOff + 16, as: UInt64.self) }
parseFilesetSymtab(mhOff: Int(foffEntry))
}
tempOff += Int(cmdsize)
}
if verbose { print("[*] Symbol table: \(symbols.count) symbols resolved") }
}
/// Parse LC_SYMTAB from a fileset entry Mach-O whose header starts at `mhOff`.
/// Reads from `buffer.original`. Mirrors Python `_parse_fileset_symtab(mh_off)`.
private func parseFilesetSymtab(mhOff: Int) {
let raw = buffer.original
guard mhOff >= 0, mhOff + 32 <= raw.count else { return }
let magic: UInt32 = raw.withUnsafeBytes { $0.load(fromByteOffset: mhOff, as: UInt32.self) }
guard magic == 0xFEED_FACF else { return }
let ncmds: UInt32 = raw.withUnsafeBytes { $0.load(fromByteOffset: mhOff + 16, as: UInt32.self) }
var off = mhOff + 32
for _ in 0 ..< ncmds {
guard off + 8 <= raw.count else { break }
let cmd: UInt32 = raw.withUnsafeBytes { $0.load(fromByteOffset: off, as: UInt32.self) }
let cmdsize: UInt32 = raw.withUnsafeBytes { $0.load(fromByteOffset: off + 4, as: UInt32.self) }
guard cmdsize >= 8 else { break }
if cmd == 0x2, off + 20 <= raw.count { // LC_SYMTAB
let symoff: UInt32 = raw.withUnsafeBytes { $0.load(fromByteOffset: off + 8, as: UInt32.self) }
let nsyms: UInt32 = raw.withUnsafeBytes { $0.load(fromByteOffset: off + 12, as: UInt32.self) }
let stroff: UInt32 = raw.withUnsafeBytes { $0.load(fromByteOffset: off + 16, as: UInt32.self) }
parseNlist(symoff: Int(symoff), nsyms: Int(nsyms), stroff: Int(stroff))
}
off += Int(cmdsize)
}
}
/// Parse nlist64 entries: add defined function symbols (n_type & 0x0E == 0x0E) to `symbols`.
/// Reads from `buffer.original` (Python: `self.raw`). Mirrors Python `_parse_nlist`.
private func parseNlist(symoff: Int, nsyms: Int, stroff: Int) {
let raw = buffer.original
let size = raw.count
for i in 0 ..< nsyms {
let entryOff = symoff + i * 16 // sizeof(nlist_64) == 16
guard entryOff + 16 <= size else { break }
// nlist_64: n_strx(u32) n_type(u8) n_sect(u8) n_desc(u16) n_value(u64)
let nStrx: UInt32 = raw.withUnsafeBytes { $0.load(fromByteOffset: entryOff, as: UInt32.self) }
let nType: UInt8 = raw.withUnsafeBytes { $0.load(fromByteOffset: entryOff + 4, as: UInt8.self) }
let nValue: UInt64 = raw.withUnsafeBytes { $0.load(fromByteOffset: entryOff + 8, as: UInt64.self) }
// n_type & 0x0E == 0x0E selects N_SECT | N_EXT (defined external symbols)
guard nType & 0x0E == 0x0E, nValue != 0 else { continue }
let nameOff = stroff + Int(nStrx)
guard nameOff < size else { continue }
var nameEnd = nameOff
while nameEnd < size, nameEnd - nameOff < 512 {
if raw[nameEnd] == 0 { break }
nameEnd += 1
}
guard nameEnd > nameOff else { continue }
guard let name = String(data: raw[nameOff ..< nameEnd], encoding: .ascii) else { continue }
// foff = n_value - base_va
let foff = Int(Int64(bitPattern: nValue) - Int64(bitPattern: baseVA))
if foff >= 0, foff < size { symbols[name] = foff }
}
}
/// Look up a function symbol, return file offset or nil.
func resolveSymbol(_ name: String) -> Int? {
symbols[name]
}
// MARK: - Code Cave
/// Find a region of zeros/0xFF/UDF in executable memory for shellcode.
/// Only searches __TEXT_EXEC and __TEXT_BOOT_EXEC segments.
/// Reads from buffer.data (mutable) so previously allocated caves are skipped.
func findCodeCave(size: Int, align: Int = 4) -> Int? {
let execSegNames: Set = ["__TEXT_EXEC", "__TEXT_BOOT_EXEC"]
// Collect exec segment ranges from parsed segments
var execRanges: [(start: Int, end: Int)] = []
for seg in segments {
guard execSegNames.contains(seg.name), seg.fileSize > 0 else { continue }
execRanges.append((Int(seg.fileOffset), Int(seg.fileOffset + seg.fileSize)))
}
// Fall back to codeRanges if no explicit exec segment found
if execRanges.isEmpty {
execRanges = codeRanges
}
execRanges.sort { $0.start < $1.start }
let needed = (size + align - 1) / align * align
for (rngStart, rngEnd) in execRanges {
var runStart = -1
var runLen = 0
var off = rngStart
while off + 4 <= rngEnd {
let val = buffer.data.withUnsafeBytes { $0.load(fromByteOffset: off, as: UInt32.self) }
// Accept zeros, 0xFFFFFFFF, or UDF (0xD4200000)
if val == 0x0000_0000 || val == 0xFFFF_FFFF || val == 0xD420_0000 {
if runStart < 0 {
runStart = off
runLen = 4
} else {
runLen += 4
}
if runLen >= needed {
return runStart
}
} else {
runStart = -1
runLen = 0
}
off += 4
}
}
return nil
}
// MARK: - Branch Encoding
/// Encode an unconditional B instruction.
func encodeB(from fromOff: Int, to toOff: Int) -> Data? {
ARM64Encoder.encodeB(from: fromOff, to: toOff)
}
/// Encode a BL instruction.
func encodeBL(from fromOff: Int, to toOff: Int) -> Data? {
ARM64Encoder.encodeBL(from: fromOff, to: toOff)
}
// MARK: - Function Finders
/// Find the end of a function by scanning forward for the next PACIBSP boundary.
///
/// Reads from `buffer.original` (Python: `_rd32(self.raw, off)`).
/// Mirrors Python `_find_func_end(func_start, max_size)`.
func findFuncEnd(_ funcStart: Int, maxSize: Int = 0x4000) -> Int {
let raw = buffer.original
let limit = min(funcStart + maxSize, raw.count)
var off = funcStart + 4
while off + 4 <= limit {
let insn: UInt32 = raw.withUnsafeBytes { $0.load(fromByteOffset: off, as: UInt32.self) }
if insn == ARM64.pacibspU32 { return off }
off += 4
}
return limit
}
/// Find the first BL to `_panic` in `range`. Returns the file offset or nil.
///
/// Reads from `buffer.original` (Python: `_rd32(self.raw, off)` via `_is_bl`).
/// Mirrors Python `_find_bl_to_panic_in_range(start, end)`.
func findBLToPanic(in range: Range<Int>) -> Int? {
guard let panicOff = panicOffset else { return nil }
let raw = buffer.original
var off = range.lowerBound
while off + 4 <= range.upperBound {
guard off + 4 <= raw.count else { break }
let insn: UInt32 = raw.withUnsafeBytes { $0.load(fromByteOffset: off, as: UInt32.self) }
if insn >> 26 == 0b100101 { // BL
let imm26 = insn & 0x03FF_FFFF
let signedImm = Int32(bitPattern: imm26 << 6) >> 6
if off + Int(signedImm) * 4 == panicOff { return off }
}
off += 4
}
return nil
}
/// Find a function that references a given string constant.
/// Returns the function-start file offset, or nil.
/// Mirrors Python `_find_func_by_string(string, code_range)`.
func findFuncByString(_ string: String, codeRange: (Int, Int)? = nil) -> Int? {
guard let strOff = buffer.findString(string) else { return nil }
let refs: [(adrpOff: Int, addOff: Int)] = if let (cs, ce) = codeRange {
findStringRefs(strOff, in: (start: cs, end: ce))
} else {
findStringRefs(strOff)
}
guard let firstRef = refs.first else { return nil }
return findFunctionStart(firstRef.adrpOff)
}
/// Find a function containing a string reference.
/// Returns (funcStart, funcEnd, refs) or nil.
/// Mirrors Python `_find_func_containing_string(string, code_range)`.
func findFuncContainingString(
_ string: String,
codeRange: (Int, Int)? = nil
) -> (Int, Int, [(adrpOff: Int, addOff: Int)])? {
guard let strOff = buffer.findString(string) else { return nil }
let refs: [(adrpOff: Int, addOff: Int)] = if let (cs, ce) = codeRange {
findStringRefs(strOff, in: (start: cs, end: ce))
} else {
findStringRefs(strOff)
}
guard let firstRef = refs.first else { return nil }
guard let funcStart = findFunctionStart(firstRef.adrpOff) else { return nil }
let funcEnd = findFuncEnd(funcStart)
return (funcStart, funcEnd, refs)
}
/// Find `_nosys`: a tiny function returning ENOSYS (errno 78 = 0x4e).
///
/// Pattern A: `mov w0, #0x4e ; ret`
/// Pattern B: `pacibsp ; mov w0, #0x4e ; ret` (ARM64e wrapper)
///
/// Reads from `buffer.original` (Python: `_rd32(self.raw, off)`).
/// Mirrors Python `_find_nosys()`.
func findNosys() -> Int? {
let movW0_4e: UInt32 = 0x5280_09C0 // MOVZ W0, #0x4e
let retVal: UInt32 = ARM64.retU32
let pacibsp: UInt32 = ARM64.pacibspU32
let raw = buffer.original
for (start, end) in codeRanges {
var off = start
while off + 8 <= end {
let v0: UInt32 = raw.withUnsafeBytes { $0.load(fromByteOffset: off, as: UInt32.self) }
let v1: UInt32 = raw.withUnsafeBytes { $0.load(fromByteOffset: off + 4, as: UInt32.self) }
if v0 == movW0_4e, v1 == retVal { return off }
if v0 == pacibsp, v1 == movW0_4e, off + 12 <= end {
let v2: UInt32 = raw.withUnsafeBytes { $0.load(fromByteOffset: off + 8, as: UInt32.self) }
if v2 == retVal { return off }
}
off += 4
}
}
return nil
}
// MARK: - Proc Info Anchor
/// Find the `_proc_info` switch anchor as (func_start, switch_off). Cached.
///
/// Shared by B6/B7 patches. Expensive on stripped kernels so result is memoised.
///
/// Anchor pattern:
/// `sub wN, wM, #1` zero-base the command index
/// `cmp wN, #0x21` bounds-check against the switch table size
///
/// Search order: direct symbol full kern_text scan.
/// Mirrors Python `_find_proc_info_anchor()`.
func findProcInfoAnchor() -> (Int, Int)? {
if procInfoAnchorScanned { return procInfoAnchor }
procInfoAnchorScanned = true
// Fast path: symbol table hit
if let funcOff = resolveSymbol("_proc_info") {
let searchEnd = min(funcOff + 0x800, buffer.count)
let switchOff = scanProcInfoSwitchPattern(start: funcOff, end: searchEnd)
procInfoAnchor = (funcOff, switchOff ?? funcOff)
return procInfoAnchor
}
// Slow path: scan __TEXT_EXEC
guard let (ks, ke) = kernTextRange else { return nil }
guard let switchOff = scanProcInfoSwitchPattern(start: ks, end: ke) else { return nil }
let funcStart = findFunctionStart(switchOff) ?? switchOff
procInfoAnchor = (funcStart, switchOff)
return procInfoAnchor
}
/// Raw scanner for `sub wN, wM, #1 ; cmp wN, #0x21`. Result is memoised in `jbScanCache`.
private func scanProcInfoSwitchPattern(start: Int, end: Int) -> Int? {
let cacheKey = "proc_info_switch_\(start)_\(end)"
if let cached = jbScanCache[cacheKey] { return cached >= 0 ? cached : nil }
let raw = buffer.original
let limit = min(end - 8, raw.count - 8)
var off = max(start, 0)
while off <= limit {
let i0: UInt32 = raw.withUnsafeBytes { $0.load(fromByteOffset: off, as: UInt32.self) }
// SUB (immediate) 32-bit: [31:24]==0x51, sh[22]==0, imm12[21:10]==1
guard (i0 & 0xFF00_0000) == 0x5100_0000 else { off += 4; continue }
guard (i0 >> 22) & 1 == 0 else { off += 4; continue }
guard (i0 >> 10) & 0xFFF == 1 else { off += 4; continue }
let subRd = i0 & 0x1F
let i1: UInt32 = raw.withUnsafeBytes { $0.load(fromByteOffset: off + 4, as: UInt32.self) }
// CMP wN, #imm SUBS WZR, wN, #imm: [31:24]==0x71, rd==31, sh==0, imm12==0x21
guard (i1 & 0xFF00_001F) == 0x7100_001F else { off += 4; continue }
guard (i1 >> 22) & 1 == 0 else { off += 4; continue }
guard (i1 >> 10) & 0xFFF == 0x21 else { off += 4; continue }
guard (i1 >> 5) & 0x1F == subRd else { off += 4; continue }
jbScanCache[cacheKey] = off
return off
}
jbScanCache[cacheKey] = -1
return nil
}
// MARK: - Convenience Properties
/// The `__TEXT_EXEC` range as (fileOffsetStart, fileOffsetEnd), or nil.
/// Equivalent to Python `self.kern_text`.
var kernTextRange: (Int, Int)? {
if let seg = segments.first(where: { $0.name == "__TEXT_EXEC" }), seg.fileSize > 0 {
return (Int(seg.fileOffset), Int(seg.fileOffset + seg.fileSize))
}
return codeRanges.first.map { ($0.start, $0.end) }
}
// MARK: - Disassemble Helper
/// Disassemble one instruction at file offset in the mutable buffer.
func disasAt(_ off: Int) -> Instruction? {
guard off >= 0, off + 4 <= buffer.count else { return nil }
return disasm.disassembleOne(in: buffer.data, at: off)
}
// MARK: - BL Decode Helper
/// Decode the BL target at `offset`, or nil if not a BL.
func jbDecodeBL(at offset: Int) -> Int? {
guard offset + 4 <= buffer.count else { return nil }
let insn = buffer.readU32(at: offset)
guard insn >> 26 == 0b100101 else { return nil }
let imm26 = insn & 0x03FF_FFFF
let signedImm = Int32(bitPattern: imm26 << 6) >> 6
return offset + Int(signedImm) * 4
}
/// Decode unconditional B target at `offset`, or nil if not a B.
func jbDecodeBBranch(at offset: Int) -> Int? {
guard offset + 4 <= buffer.count else { return nil }
let insn = buffer.readU32(at: offset)
guard (insn & 0x7C00_0000) == 0x1400_0000 else { return nil }
let imm26 = insn & 0x03FF_FFFF
let signedImm = Int32(bitPattern: imm26 << 6) >> 6
return offset + Int(signedImm) * 4
}
// MARK: - Chained Pointer Decode
/// Decode an arm64e auth-rebase chained fixup pointer to a file offset.
/// Returns -1 if not an auth-rebase pointer or decode fails.
func decodeChainedPtr(_ raw: UInt64) -> Int {
guard (raw & (1 << 63)) != 0 else { return -1 }
let target = Int(raw & 0x3FFF_FFFF)
guard target > 0, target < buffer.count else { return -1 }
return target
}
// MARK: - Shared code-range / branch helpers (Group B patches)
/// Return true if `offset` falls within any known code range.
func jbIsInCodeRange(_ offset: Int) -> Bool {
codeRanges.contains { offset >= $0.start && offset < $0.end }
}
/// Decode a B, BL, B.cond, CBZ, or CBNZ target at `offset`.
/// Returns (targetFileOffset, isCond) or nil if not a branch.
func jbDecodeBranchTarget(at offset: Int) -> (target: Int, isCond: Bool)? {
guard offset >= 0, offset + 4 <= buffer.count else { return nil }
let insn = buffer.readU32(at: offset)
let op6 = insn >> 26
// Unconditional B
if op6 == 0b000101 {
let imm26 = insn & 0x03FF_FFFF
let signedImm = Int32(bitPattern: imm26 << 6) >> 6
return (offset + Int(signedImm) * 4, false)
}
// BL
if op6 == 0b100101 {
let imm26 = insn & 0x03FF_FFFF
let signedImm = Int32(bitPattern: imm26 << 6) >> 6
return (offset + Int(signedImm) * 4, false)
}
// B.cond: [31:24]=0x54, bit[4]=0
if (insn >> 24) == 0x54, (insn & 0x10) == 0 {
let imm19 = Int32(bitPattern: ((insn >> 5) & 0x7FFFF) << 13) >> 13
return (offset + Int(imm19) * 4, true)
}
// CBZ/CBNZ: [31:25] = 0b011010x or 0b111010x
let op7 = insn >> 25
if op7 == 0b0110100 || op7 == 0b0110101 || op7 == 0b1110100 || op7 == 0b1110101 {
let imm19 = Int32(bitPattern: ((insn >> 5) & 0x7FFFF) << 13) >> 13
return (offset + Int(imm19) * 4, true)
}
return nil
}
}

View File

@@ -0,0 +1,53 @@
// KernelPatcher.swift Regular kernel patcher orchestrator (26 patches).
//
// Swift equivalent of scripts/patchers/kernel.py.
// Each patch method is defined as an extension in its own file under Patches/.
import Foundation
/// Regular kernel patcher for iOS prelinked kernelcaches.
///
/// Patches are applied in the same order as the Python reference implementation.
/// Each patch method is an extension in a separate file under `Kernel/Patches/`.
public final class KernelPatcher: KernelPatcherBase, Patcher {
public let component = "kernelcache"
// MARK: - Find All
public func findAll() throws -> [PatchRecord] {
patches = []
// Parse Mach-O structure and build indices
parseMachO()
buildADRPIndex()
buildBLIndex()
findPanic()
// Apply patches in order (matching Python find_all)
patchApfsRootSnapshot() // 1
patchApfsSealBroken() // 2
patchBsdInitRootvp() // 3
patchLaunchConstraints() // 4-5
patchDebugger() // 6-7
patchPostValidationNOP() // 8
patchPostValidationCMP() // 9
patchDyldPolicy() // 10-11
patchApfsGraft() // 12
patchApfsMount() // 13-15
patchSandbox() // 16-25
return patches
}
@discardableResult
public func apply() throws -> Int {
let records = try findAll()
guard !records.isEmpty else {
log(" [!] No kernel patches found")
return 0
}
let count = applyPatches()
log("\n [\(count) kernel patches applied]")
return count
}
}

View File

@@ -0,0 +1,495 @@
// KernelPatcherBase.swift Base infrastructure for kernel patching.
//
// Provides Mach-O parsing, ADRP/BL index building, string reference search,
// kext range discovery, and the emit() system.
// Swift equivalent of scripts/patchers/kernel_base.py.
import Capstone
import Foundation
/// Base class for kernel patchers providing shared infrastructure.
open class KernelPatcherBase {
// MARK: - Properties
/// Mutable working buffer.
public let buffer: BinaryBuffer
/// Verbose logging.
public let verbose: Bool
/// Collected patch records.
public var patches: [PatchRecord] = []
/// Base virtual address of the kernelcache __TEXT segment.
public var baseVA: UInt64 = 0
/// Code ranges (file offset start, end) for scanning.
public var codeRanges: [(start: Int, end: Int)] = []
/// All parsed segments.
public var segments: [MachOSegmentInfo] = []
/// Parsed sections keyed by "segment,section".
public var sections: [String: MachOSectionInfo] = [:]
/// ADRP index: page address [file offsets of ADRP instructions].
public var adrpIndex: [UInt64: [Int]] = [:]
/// BL index: target file offset [caller file offsets].
public var blIndex: [Int: [Int]] = [:]
/// Cached panic function file offset.
public var panicOffset: Int?
/// Disassembler instance.
public let disasm = ARM64Disassembler()
// MARK: - Init
public init(data: Data, verbose: Bool = true) {
buffer = BinaryBuffer(data)
self.verbose = verbose
}
// MARK: - Mach-O Parsing
/// Parse the Mach-O structure and build indices.
public func parseMachO() {
segments = MachOParser.parseSegments(from: buffer.data)
sections = MachOParser.parseSections(from: buffer.data)
// Find base VA from __TEXT segment
if let textSeg = segments.first(where: { $0.name == "__TEXT" }) {
baseVA = textSeg.vmAddr
}
// Build code ranges from __TEXT_EXEC or __TEXT,__text
if let textExec = segments.first(where: { $0.name == "__TEXT_EXEC" }) {
codeRanges.append((Int(textExec.fileOffset), Int(textExec.fileOffset + textExec.fileSize)))
} else if let textText = sections["__TEXT,__text"] {
codeRanges.append((Int(textText.fileOffset), Int(textText.fileOffset) + Int(textText.size)))
}
}
// MARK: - Emit System
/// Record a patch at the given file offset.
public func emit(
_ offset: Int,
_ patchBytes: Data,
patchID: String,
virtualAddress: UInt64? = nil,
description: String
) {
let originalBytes = buffer.readBytes(at: offset, count: patchBytes.count)
// Disassemble before/after
let beforeInsn = disasm.disassembleOne(in: buffer.original, at: offset)
let afterInsn = disasm.disassembleOne(patchBytes, at: UInt64(offset))
let beforeStr = beforeInsn.map { "\($0.mnemonic) \($0.operandString)" } ?? "???"
let afterStr = afterInsn.map { "\($0.mnemonic) \($0.operandString)" } ?? "???"
let record = PatchRecord(
patchID: patchID,
component: "kernelcache",
fileOffset: offset,
virtualAddress: virtualAddress,
originalBytes: originalBytes,
patchedBytes: patchBytes,
beforeDisasm: beforeStr,
afterDisasm: afterStr,
description: description
)
patches.append(record)
if verbose {
print(" 0x\(String(format: "%06X", offset)): \(beforeStr)\(afterStr) [\(description)]")
}
}
/// Apply all collected patches to the buffer.
public func applyPatches() -> Int {
for record in patches {
buffer.writeBytes(at: record.fileOffset, bytes: record.patchedBytes)
}
return patches.count
}
// MARK: - Index Building
/// Build ADRP index for O(1) page-address lookups.
public func buildADRPIndex() {
adrpIndex = [:]
for (start, end) in codeRanges {
var offset = start
while offset + 4 <= end {
let insn = buffer.readU32(at: offset)
// ADRP: [31]=1, [28:24]=10000
if insn & 0x9F00_0000 == 0x9000_0000 {
// Decode page address
let immhi = (insn >> 5) & 0x7FFFF
let immlo = (insn >> 29) & 0x3
let imm21 = (immhi << 2) | immlo
// Sign-extend
let signedImm = Int64(Int32(bitPattern: imm21 << 11) >> 11)
let pageAddr = (UInt64(offset) & ~0xFFF) &+ UInt64(bitPattern: signedImm << 12)
adrpIndex[pageAddr, default: []].append(offset)
}
offset += 4
}
}
}
/// Build BL index for target-to-callers mapping.
public func buildBLIndex() {
blIndex = [:]
for (start, end) in codeRanges {
var offset = start
while offset + 4 <= end {
let insn = buffer.readU32(at: offset)
// BL: [31:26] = 100101
if insn >> 26 == 0b100101 {
let imm26 = insn & 0x03FF_FFFF
let signedImm = Int32(bitPattern: imm26 << 6) >> 6
let target = offset + Int(signedImm) * 4
blIndex[target, default: []].append(offset)
}
offset += 4
}
}
}
// MARK: - String Reference Search
/// Find all ADRP+ADD references to a string at the given file offset.
public func findStringRefs(_ stringOffset: Int) -> [(adrpOff: Int, addOff: Int)] {
let targetPage = UInt64(stringOffset) & ~0xFFF
let pageOff = UInt64(stringOffset) & 0xFFF
guard let adrpOffsets = adrpIndex[targetPage] else { return [] }
var refs: [(Int, Int)] = []
for adrpOff in adrpOffsets {
// Check the next few instructions for ADD with matching page offset
for delta in stride(from: 4, through: 32, by: 4) {
let addCandOff = adrpOff + delta
guard addCandOff + 4 <= buffer.count else { break }
let addInsn = buffer.readU32(at: addCandOff)
// ADD immediate: [31]=1, [30:29]=00, [28:24]=10001
guard addInsn & 0xFF80_0000 == 0x9100_0000 else { continue }
let imm12 = (addInsn >> 10) & 0xFFF
let adrpInsn = buffer.readU32(at: adrpOff)
let adrpRd = adrpInsn & 0x1F
let addRn = (addInsn >> 5) & 0x1F
guard adrpRd == addRn, imm12 == UInt32(pageOff) else { continue }
refs.append((adrpOff, addCandOff))
break
}
}
return refs
}
/// Find all ADRP+ADD references to a string at the given file offset,
/// filtered to a code range (start inclusive, end exclusive).
public func findStringRefs(_ stringOffset: Int, in range: (start: Int, end: Int)) -> [(adrpOff: Int, addOff: Int)] {
findStringRefs(stringOffset).filter { $0.adrpOff >= range.start && $0.adrpOff < range.end }
}
/// Convenience: find a string then return ranged ADRP+ADD refs.
/// Returns empty if the string is not found or has no refs in range.
public func findStringRefs(in range: (start: Int, end: Int), string: String) -> [(adrpOff: Int, addOff: Int)] {
guard let strOff = buffer.findString(string) else { return [] }
return findStringRefs(strOff, in: range)
}
/// Convenience: find a string by file offset, with range filter.
public func findStringRefs(in range: (start: Int, end: Int), stringOffset: Int) -> [(adrpOff: Int, addOff: Int)] {
findStringRefs(stringOffset, in: range)
}
// MARK: - Branch Helpers
/// Check whether the instruction at `offset` is a BL targeting `target` (file offset).
/// Returns true if the BL opcode decodes to the exact target offset.
public func isBL(at offset: Int, target: Int) -> Bool {
guard offset + 4 <= buffer.count else { return false }
let insn = buffer.readU32(at: offset)
// BL encoding: [31:26] = 0b100101
guard insn >> 26 == 0b100101 else { return false }
let imm26 = insn & 0x03FF_FFFF
let signedImm = Int32(bitPattern: imm26 << 6) >> 6
let resolved = offset + Int(signedImm) * 4
return resolved == target
}
// MARK: - Conditional Branch Helpers
/// Set of conditional branch mnemonics that may gate a panic path.
static let conditionalBranchMnemonics: Set<String> = [
"b.eq", "b.ne", "b.cs", "b.hs", "b.cc", "b.lo", "b.mi", "b.pl",
"b.vs", "b.vc", "b.hi", "b.ls", "b.ge", "b.lt", "b.gt", "b.le", "b.al",
"cbz", "cbnz", "tbz", "tbnz",
]
/// Decode a conditional branch instruction, returning its target as a file offset.
/// Returns nil if the instruction is not a conditional branch.
public func conditionalBranchTarget(insn: Instruction) -> Int? {
guard KernelPatcherBase.conditionalBranchMnemonics.contains(insn.mnemonic) else { return nil }
// Target is always the last IMM operand.
guard let detail = insn.aarch64 else { return nil }
for op in detail.operands.reversed() {
if op.type == AARCH64_OP_IMM {
return Int(op.imm)
}
}
return nil
}
// MARK: - Panic Discovery
/// Find _panic: the most-called function whose callers reference '@%s:%d' strings.
/// Populates `panicOffset`.
public func findPanic() {
// Sort targets by call-site count, descending.
let sorted = blIndex.sorted { $0.value.count > $1.value.count }
for (targetOff, callers) in sorted.prefix(15) {
guard callers.count >= 2000 else { break }
var confirmed = 0
for callerOff in callers.prefix(30) {
// Look back up to 8 instructions for ADRP x0 + ADD x0 pattern
// pointing at a string containing "@%s:%d".
var back = callerOff - 4
while back >= max(callerOff - 32, 0) {
let addInsn = buffer.readU32(at: back)
// ADD x0, x0, #imm [31:22]=1001000100, [9:5]=x0, [4:0]=x0
if (addInsn & 0xFFC0_03E0) == 0x9100_0000 {
let addImm = Int((addInsn >> 10) & 0xFFF)
if back >= 4 {
let adrpInsn = buffer.readU32(at: back - 4)
// ADRP x0: [31:24]=10010000, [4:0]=0 (x0)
if (adrpInsn & 0x9F00_001F) == 0x9000_0000 {
let immhi = (adrpInsn >> 5) & 0x7FFFF
let immlo = (adrpInsn >> 29) & 0x3
var imm = Int((immhi << 2) | immlo)
if imm & (1 << 20) != 0 { imm -= 1 << 21 }
let pageDelta = imm << 12
let pcPage = (back - 4) & ~0xFFF
let strFoff = pcPage + pageDelta + addImm
if strFoff >= 0, strFoff + 60 < buffer.count {
let snippet = buffer.data[strFoff ..< strFoff + 60]
if snippet.range(of: Data("@%s:%d".utf8)) != nil ||
snippet.range(of: Data("%s:%d".utf8)) != nil
{
confirmed += 1
break
}
}
}
}
break
}
back -= 4
}
if confirmed >= 3 { break }
}
if confirmed >= 3 {
panicOffset = targetOff
if verbose { print(String(format: " [*] _panic at foff 0x%X (%d callers)", targetOff, callers.count)) }
return
}
}
// Fallback: use the 3rd most-called target (index 2), like Python.
if sorted.count > 2 {
panicOffset = sorted[2].key
} else if let first = sorted.first {
panicOffset = first.key
}
if let p = panicOffset {
if verbose { print(String(format: " [*] _panic (fallback) at foff 0x%X", p)) }
}
}
// MARK: - Function Discovery
/// Find the start of the function containing the instruction at `offset`.
/// Scans backward for PACIBSP or STP x29, x30, [sp, ...].
public func findFunctionStart(_ offset: Int, maxBack: Int = 0x4000) -> Int? {
let stop = max(0, offset - maxBack)
var scan = offset - 4
scan &= ~3
while scan > stop {
let insn = buffer.readU32(at: scan)
if insn == ARM64.pacibspU32 {
return scan
}
// STP x29, x30, [sp, #imm] (common prologue)
// Encoding: 1x101001xx011101_11110xxxxxxxxxxx
if insn & 0x7FC0_7FFF == 0x2900_7BFD {
// Check further back for PACIBSP (prologue may have
// multiple STP instructions before x29,x30)
let innerStop = max(0, scan - 0x24)
var k = scan - 4
while k > innerStop {
if buffer.readU32(at: k) == ARM64.pacibspU32 {
return k
}
k -= 4
}
return scan
}
scan -= 4
}
return nil
}
// MARK: - VA/Offset Conversion
/// Convert file offset to virtual address.
public func fileOffsetToVA(_ offset: Int) -> UInt64? {
for seg in segments {
let segStart = Int(seg.fileOffset)
let segEnd = segStart + Int(seg.fileSize)
if offset >= segStart, offset < segEnd {
return seg.vmAddr + UInt64(offset - segStart)
}
}
return nil
}
/// Convert virtual address to file offset.
public func vaToFileOffset(_ va: UInt64) -> Int? {
MachOParser.vaToFileOffset(va, segments: segments)
}
// MARK: - Kext Range Discovery
/// File-offset range (start, end) of the AMFI kext's __TEXT_EXEC.__text section.
///
/// Discovered from __PRELINK_INFO. Falls back to the full kernel code range.
public func amfiTextRange() -> (start: Int, end: Int) {
kextTextRange(bundleID: "com.apple.driver.AppleMobileFileIntegrity")
}
/// File-offset range (start, end) of the Sandbox kext's __TEXT_EXEC.__text section.
///
/// Discovered from __PRELINK_INFO. Falls back to the full kernel code range.
public func sandboxTextRange() -> (start: Int, end: Int) {
kextTextRange(bundleID: "com.apple.security.sandbox")
}
/// File-offset range (start, end) of the APFS kext's __TEXT_EXEC.__text section.
///
/// Discovered from __PRELINK_INFO. Falls back to the full kernel code range.
public func apfsTextRange() -> (start: Int, end: Int) {
kextTextRange(bundleID: "com.apple.filesystems.apfs")
}
/// Generic kext text range lookup by bundle identifier.
///
/// Parses __PRELINK_INFO to find the kext's load address, then reads its
/// embedded Mach-O to extract the __TEXT_EXEC.__text section range.
/// Falls back to the full kernel code range on any failure.
public func kextTextRange(bundleID: String) -> (start: Int, end: Int) {
guard let prelinkSeg = segments.first(where: { $0.name == "__PRELINK_INFO" }),
prelinkSeg.fileSize > 0
else {
return codeRanges.first ?? (0, buffer.count)
}
let pFoff = Int(prelinkSeg.fileOffset)
let pEnd = min(pFoff + Int(prelinkSeg.fileSize), buffer.count)
let prelinkSlice = buffer.data[pFoff ..< pEnd]
guard let xmlStart = prelinkSlice.range(of: Data("<?xml".utf8)),
let plistEnd = prelinkSlice.range(of: Data("</plist>".utf8))
else {
return codeRanges.first ?? (0, buffer.count)
}
let xmlData = prelinkSlice[xmlStart.lowerBound ..< plistEnd.upperBound]
guard let plist = try? PropertyListSerialization.propertyList(from: xmlData, format: nil),
let dict = plist as? [String: Any],
let items = dict["_PrelinkInfoDictionary"] as? [[String: Any]]
else {
return codeRanges.first ?? (0, buffer.count)
}
for item in items {
guard let bid = item["CFBundleIdentifier"] as? String,
bid == bundleID,
let execAddrAny = item["_PrelinkExecutableLoadAddr"]
else { continue }
var execAddr: UInt64 = 0
if let n = execAddrAny as? UInt64 { execAddr = n }
else if let n = execAddrAny as? Int { execAddr = UInt64(bitPattern: Int64(n)) }
else if let n = execAddrAny as? NSNumber { execAddr = n.uint64Value }
execAddr &= 0xFFFF_FFFF_FFFF_FFFF
guard execAddr > baseVA else { continue }
let kextFoff = Int(execAddr - baseVA)
guard kextFoff >= 0, kextFoff < buffer.count else { continue }
if let range = parseKextTextExecRange(at: kextFoff) {
return range
}
}
return codeRanges.first ?? (0, buffer.count)
}
/// Parse an embedded kext Mach-O at `kextFoff` and return its __TEXT_EXEC.__text range.
public func parseKextTextExecRange(at kextFoff: Int) -> (start: Int, end: Int)? {
guard kextFoff + 32 <= buffer.count else { return nil }
let magic: UInt32 = buffer.data.withUnsafeBytes { $0.load(fromByteOffset: kextFoff, as: UInt32.self) }
guard magic == 0xFEED_FACF else { return nil }
let ncmds: UInt32 = buffer.data.withUnsafeBytes { $0.load(fromByteOffset: kextFoff + 16, as: UInt32.self) }
var off = kextFoff + 32
for _ in 0 ..< ncmds {
guard off + 8 <= buffer.count else { break }
let cmd: UInt32 = buffer.data.withUnsafeBytes { $0.load(fromByteOffset: off, as: UInt32.self) }
let cmdsize: UInt32 = buffer.data.withUnsafeBytes { $0.load(fromByteOffset: off + 4, as: UInt32.self) }
if cmd == 0x19 { // LC_SEGMENT_64
let nameBytes = buffer.data[off + 8 ..< off + 24]
let segName = String(data: nameBytes, encoding: .utf8)?
.trimmingCharacters(in: CharacterSet(charactersIn: "\0")) ?? ""
if segName == "__TEXT_EXEC" {
let vmAddr: UInt64 = buffer.data.withUnsafeBytes { $0.load(fromByteOffset: off + 24, as: UInt64.self) }
let fileSize: UInt64 = buffer.data.withUnsafeBytes { $0.load(fromByteOffset: off + 48, as: UInt64.self) }
let nsects: UInt32 = buffer.data.withUnsafeBytes { $0.load(fromByteOffset: off + 64, as: UInt32.self) }
var sectOff = off + 72
for _ in 0 ..< nsects {
guard sectOff + 80 <= buffer.count else { break }
let sectNameBytes = buffer.data[sectOff ..< sectOff + 16]
let sectName = String(data: sectNameBytes, encoding: .utf8)?
.trimmingCharacters(in: CharacterSet(charactersIn: "\0")) ?? ""
if sectName == "__text" {
let sectAddr: UInt64 = buffer.data.withUnsafeBytes { $0.load(fromByteOffset: sectOff + 32, as: UInt64.self) }
let sectSize: UInt64 = buffer.data.withUnsafeBytes { $0.load(fromByteOffset: sectOff + 40, as: UInt64.self) }
guard sectAddr >= baseVA else { break }
let sectFoff = Int(sectAddr - baseVA)
return (sectFoff, sectFoff + Int(sectSize))
}
sectOff += 80
}
// Fallback: use the full segment.
guard vmAddr >= baseVA else { break }
let segFoff = Int(vmAddr - baseVA)
return (segFoff, segFoff + Int(fileSize))
}
}
off += Int(cmdsize)
}
return nil
}
}

View File

@@ -0,0 +1,121 @@
// KernelPatchApfsGraft.swift APFS graft patch (patch 12).
//
// Neutralizes root hash validation inside _apfs_graft by replacing the BL
// to validate_on_disk_root_hash with MOV W0, #0.
//
// Python source: scripts/patchers/kernel_patch_apfs_graft.py
import Foundation
extension KernelPatcher {
// MARK: - Patch 12: _apfs_graft
/// Patch 12: Replace the BL to validate_on_disk_root_hash inside _apfs_graft
/// with `mov w0, #0`, bypassing root hash validation.
///
/// Strategy:
/// 1. Locate the `apfs_graft` C string (null-byte bounded) in the binary.
/// 2. Follow ADRP+ADD references into code to find the _apfs_graft function start.
/// 3. Locate validate_on_disk_root_hash via the `authenticate_root_hash` string.
/// 4. Scan _apfs_graft for a BL whose resolved target is validate_on_disk_root_hash.
/// 5. Emit MOV W0, #0 at that site.
@discardableResult
func patchApfsGraft() -> Bool {
log("\n[12] _apfs_graft: mov w0,#0 (validate_root_hash BL)")
// Step 1: Find the "apfs_graft" null-terminated C string.
// Python: exact = self.raw.find(b"\x00apfs_graft\x00"); str_off = exact + 1
guard let apfsGraftPattern = "apfs_graft".data(using: .utf8) else { return false }
var nullPrefix = Data([0])
nullPrefix.append(apfsGraftPattern)
nullPrefix.append(0)
guard let exactRange = buffer.data.range(of: nullPrefix) else {
log(" [-] 'apfs_graft' string not found")
return false
}
let strOff = exactRange.lowerBound + 1 // skip the leading null byte
// Step 2: Find ADRP+ADD code references to that string.
let refs = findStringRefs(strOff)
guard let firstRef = refs.first else {
log(" [-] no code refs to 'apfs_graft'")
return false
}
// Step 3: Find the function start from the reference site.
guard let graftStart = findFunctionStart(firstRef.adrpOff) else {
log(" [-] _apfs_graft function start not found")
return false
}
// Step 4: Locate validate_on_disk_root_hash via the `authenticate_root_hash` string.
guard let vrhFunc = findValidateRootHashFunc() else {
log(" [-] validate_on_disk_root_hash not found")
return false
}
// Step 5: Scan _apfs_graft body for a BL whose target is vrhFunc.
// Stop at PACIBSP (start of a new function). Mirror of Python logic:
// for scan in range(graft_start, graft_start + 0x2000, 4):
// if scan > graft_start + 8 and rd32(scan) == PACIBSP: break
// if _is_bl(scan) == vrh_func: emit(scan, MOV_W0_0, ...)
let scanEnd = min(graftStart + 0x2000, buffer.count - 4)
var scan = graftStart
while scan <= scanEnd {
if scan > graftStart + 8, buffer.readU32(at: scan) == ARM64.pacibspU32 {
break
}
if let blTarget = decodeBLTarget(at: scan), blTarget == vrhFunc {
let va = fileOffsetToVA(scan)
emit(scan, ARM64.movW0_0,
patchID: "apfs_graft",
virtualAddress: va,
description: "mov w0,#0 [_apfs_graft]")
return true
}
scan += 4
}
log(" [-] BL to validate_on_disk_root_hash not found in _apfs_graft")
return false
}
// MARK: - Helpers
/// Find validate_on_disk_root_hash by locating the `authenticate_root_hash` string
/// and resolving the function that references it.
private func findValidateRootHashFunc() -> Int? {
guard let authHashStr = "authenticate_root_hash".data(using: .utf8) else { return nil }
var searchData = authHashStr
searchData.append(0) // null terminator
// Try with null terminator first, then without
var strOff: Int?
if let range = buffer.data.range(of: searchData) {
strOff = range.lowerBound
} else if let range = buffer.data.range(of: authHashStr) {
strOff = range.lowerBound
}
guard let foundOff = strOff else { return nil }
let refs = findStringRefs(foundOff)
guard let firstRef = refs.first else { return nil }
return findFunctionStart(firstRef.adrpOff)
}
/// Decode a BL instruction at the given file offset and return the target file offset,
/// or nil if the instruction at that offset is not a BL.
///
/// ARM64 BL encoding: bits [31:26] = 0b100101, bits [25:0] = signed imm26
/// Target = PC + SignExt(imm26) * 4 (all in file-offset space)
private func decodeBLTarget(at offset: Int) -> Int? {
guard offset + 4 <= buffer.count else { return nil }
let insn = buffer.readU32(at: offset)
guard insn >> 26 == 0b100101 else { return nil } // BL opcode
let imm26 = insn & 0x03FF_FFFF
let signedImm = Int32(bitPattern: imm26 << 6) >> 6
return offset + Int(signedImm) * 4
}
}

View File

@@ -0,0 +1,415 @@
// KernelPatchApfsMount.swift APFS mount/dev-role patches (patches 13, 14, 15, 16).
//
// Python source: scripts/patchers/kernel_patch_apfs_mount.py
// scripts/patchers/kernel_patch_apfs_graft.py (patch_handle_fsioc_graft)
import Capstone
import Foundation
extension KernelPatcher {
// MARK: - Private Helpers
/// Decode a BL instruction at `offset`. Returns the target file offset, or nil.
private func apfsMountDecodeBL(at offset: Int) -> Int? {
guard offset + 4 <= buffer.count else { return nil }
let insn = buffer.readU32(at: offset)
guard insn >> 26 == 0b100101 else { return nil }
let imm26 = insn & 0x03FF_FFFF
let signedImm = Int32(bitPattern: imm26 << 6) >> 6
return offset + Int(signedImm) * 4
}
/// Return true if the function at `funcOff` contains a RET within `maxBytes`.
private func apfsMountIsLeaf(at funcOff: Int, maxBytes: Int = 0x20) -> Bool {
let limit = min(funcOff + maxBytes, buffer.count)
var scan = funcOff
while scan + 4 <= limit {
let insn = buffer.readU32(at: scan)
if insn == ARM64.retU32 || insn == ARM64.retaaU32 || insn == ARM64.retabU32 {
return true
}
scan += 4
}
return false
}
// MARK: - Patch 13: _apfs_vfsop_mount cmp x0, x0
/// Patch 13: Replace `cmp x0, Xm` with `cmp x0, x0` in _apfs_vfsop_mount.
///
/// The target CMP follows the pattern: BL (returns current_thread in x0),
/// ADRP + LDR + LDR (load kernel_task global), CMP x0, Xm, B.EQ.
/// We require x0 as the first CMP operand to distinguish it from other CMPs.
@discardableResult
func patchApfsVfsopMountCmp() -> Bool {
log("\n[13] _apfs_vfsop_mount: cmp x0,x0 (mount rw check)")
let apfsRange = apfsTextRange()
guard let strOff = buffer.findString("apfs_mount_upgrade_checks") else {
log(" [-] 'apfs_mount_upgrade_checks' string not found")
return false
}
let refs = findStringRefs(strOff, in: apfsRange)
guard !refs.isEmpty else {
log(" [-] no code refs to apfs_mount_upgrade_checks")
return false
}
// Locate the function start of _apfs_mount_upgrade_checks.
guard let funcStart = findFunctionStart(refs[0].adrpOff) else {
log(" [-] function start not found for apfs_mount_upgrade_checks ref")
return false
}
// Gather BL callers of that function.
var callers = blIndex[funcStart] ?? []
if callers.isEmpty {
callers = blIndex[funcStart + 4] ?? []
}
if callers.isEmpty {
// Manual scan for BL callers when not in index.
for (rangeStart, rangeEnd) in codeRanges {
var off = rangeStart
while off + 4 <= rangeEnd {
if let target = apfsMountDecodeBL(at: off),
target >= funcStart, target <= funcStart + 4
{
callers.append(off)
}
off += 4
}
}
}
guard !callers.isEmpty else {
log(" [-] no BL callers of _apfs_mount_upgrade_checks found")
return false
}
for callerOff in callers {
guard callerOff >= apfsRange.start, callerOff < apfsRange.end else { continue }
let callerFuncStart = findFunctionStart(callerOff)
let scanStart = callerFuncStart ?? max(callerOff - 0x800, apfsRange.start)
let scanEnd = min(callerOff + 0x100, apfsRange.end)
var scan = scanStart
while scan + 4 <= scanEnd {
guard let insn = disasm.disassembleOne(in: buffer.data, at: scan),
insn.mnemonic == "cmp",
let detail = insn.aarch64,
detail.operands.count >= 2
else {
scan += 4; continue
}
let ops = detail.operands
// Both operands must be registers.
guard ops[0].type == AARCH64_OP_REG, ops[1].type == AARCH64_OP_REG else {
scan += 4; continue
}
// First operand must be x0 (return value from BL current_thread).
guard ops[0].reg == AARCH64_REG_X0 else {
scan += 4; continue
}
// Skip CMP x0, x0 (already patched or trivially true).
guard ops[0].reg != ops[1].reg else {
scan += 4; continue
}
let va = fileOffsetToVA(scan)
emit(
scan,
ARM64.cmpX0X0,
patchID: "kernel.apfs_vfsop_mount.cmp_x0_x0",
virtualAddress: va,
description: "cmp x0,x0 (was \(insn.mnemonic) \(insn.operandString)) [_apfs_vfsop_mount]"
)
return true
}
}
log(" [-] CMP x0,Xm not found near mount_upgrade_checks caller")
return false
}
// MARK: - Patch 14: _apfs_mount_upgrade_checks mov w0, #0
/// Patch 14: Replace `tbnz w0, #0xe, ...` with `mov w0, #0`.
///
/// Within the function a BL calls a small leaf flag-reading function, then
/// TBNZ w0, #0xe branches to the error path. Replace TBNZ with mov w0,#0.
@discardableResult
func patchApfsMountUpgradeChecks() -> Bool {
log("\n[14] _apfs_mount_upgrade_checks: mov w0,#0 (tbnz bypass)")
let apfsRange = apfsTextRange()
guard let strOff = buffer.findString("apfs_mount_upgrade_checks") else {
log(" [-] 'apfs_mount_upgrade_checks' string not found")
return false
}
let refs = findStringRefs(strOff, in: apfsRange)
guard !refs.isEmpty else {
log(" [-] no code refs to apfs_mount_upgrade_checks")
return false
}
guard let funcStart = findFunctionStart(refs[0].adrpOff) else {
log(" [-] function start not found")
return false
}
let limit = min(funcStart + 0x200, buffer.count)
var scan = funcStart
while scan + 4 <= limit {
// Stop at PACIBSP (new function boundary), but not at early returns.
if scan > funcStart + 8, buffer.readU32(at: scan) == ARM64.pacibspU32 {
break
}
guard let blTarget = apfsMountDecodeBL(at: scan) else {
scan += 4; continue
}
// Target must be a small leaf function.
guard apfsMountIsLeaf(at: blTarget) else {
scan += 4; continue
}
// Next instruction must be TBNZ w0, #N (any bit).
let nextOff = scan + 4
guard nextOff + 4 <= buffer.count,
let nextInsn = disasm.disassembleOne(in: buffer.data, at: nextOff),
nextInsn.mnemonic == "tbnz",
let detail = nextInsn.aarch64,
!detail.operands.isEmpty,
detail.operands[0].type == AARCH64_OP_REG,
detail.operands[0].reg == AARCH64_REG_W0
else {
scan += 4; continue
}
let va = fileOffsetToVA(nextOff)
emit(
nextOff,
ARM64.movW0_0,
patchID: "kernel.apfs_mount_upgrade_checks.mov_w0_0",
virtualAddress: va,
description: "mov w0,#0 [_apfs_mount_upgrade_checks]"
)
return true
}
log(" [-] BL + TBNZ w0 pattern not found")
return false
}
// MARK: - Patch 15: _handle_fsioc_graft mov w0, #0
/// Patch 15: Replace the BL to `validate_payload_and_manifest` with `mov w0, #0`
/// inside `_handle_fsioc_graft`.
@discardableResult
func patchHandleFsiocGraft() -> Bool {
log("\n[15] _handle_fsioc_graft: mov w0,#0 (validate BL)")
let apfsRange = apfsTextRange()
// Locate "handle_fsioc_graft" string (expect surrounding NUL bytes).
guard let raw = "handle_fsioc_graft".data(using: .utf8) else { return false }
var searchPattern = Data([0x00])
searchPattern.append(raw)
searchPattern.append(0x00)
guard let patternRange = buffer.data.range(of: searchPattern) else {
log(" [-] 'handle_fsioc_graft' string not found")
return false
}
let fsiocStrOff = patternRange.lowerBound + 1 // skip leading NUL
let fsiocRefs = findStringRefs(fsiocStrOff, in: apfsRange)
guard !fsiocRefs.isEmpty else {
log(" [-] no code refs to handle_fsioc_graft string")
return false
}
guard let fsiocStart = findFunctionStart(fsiocRefs[0].adrpOff) else {
log(" [-] _handle_fsioc_graft function start not found")
return false
}
// Locate validate_payload_and_manifest function start.
guard let valStrOff = buffer.findString("validate_payload_and_manifest") else {
log(" [-] 'validate_payload_and_manifest' string not found")
return false
}
let valRefs = findStringRefs(valStrOff, in: apfsRange)
guard !valRefs.isEmpty else {
log(" [-] no code refs to validate_payload_and_manifest")
return false
}
guard let valFunc = findFunctionStart(valRefs[0].adrpOff) else {
log(" [-] validate_payload_and_manifest function start not found")
return false
}
// Scan _handle_fsioc_graft for BL targeting valFunc.
let scanEnd = min(fsiocStart + 0x400, buffer.count)
var scan = fsiocStart
while scan + 4 <= scanEnd {
if scan > fsiocStart + 8, buffer.readU32(at: scan) == ARM64.pacibspU32 {
break
}
if isBL(at: scan, target: valFunc) {
let va = fileOffsetToVA(scan)
emit(
scan,
ARM64.movW0_0,
patchID: "kernel.handle_fsioc_graft.mov_w0_0",
virtualAddress: va,
description: "mov w0,#0 [_handle_fsioc_graft]"
)
return true
}
scan += 4
}
log(" [-] BL to validate_payload_and_manifest not found in _handle_fsioc_graft")
return false
}
// MARK: - Patch 16: handle_get_dev_by_role bypass entitlement gate
/// Patch 16: NOP CBZ/CBNZ on X0/W0 that branch to entitlement-error blocks
/// in `handle_get_dev_by_role`.
///
/// Error blocks are identified by `mov w8, #0x332D` or `mov w8, #0x333B`
/// within the first 0x30 bytes (known entitlement-gate line IDs).
@discardableResult
func patchHandleGetDevByRoleEntitlement() -> Bool {
log("\n[16] handle_get_dev_by_role: bypass entitlement gate")
let apfsRange = apfsTextRange()
guard let strOff = buffer.findString("com.apple.apfs.get-dev-by-role") else {
log(" [-] entitlement string not found")
return false
}
let refs = findStringRefs(strOff, in: apfsRange)
guard !refs.isEmpty else {
log(" [-] no code refs to entitlement string")
return false
}
for ref in refs {
guard let funcStart = findFunctionStart(ref.adrpOff) else { continue }
let funcEnd = min(funcStart + 0x1200, buffer.count)
var candidates: [(off: Int, target: Int)] = []
var scan = funcStart
while scan + 4 <= funcEnd {
guard let insn = disasm.disassembleOne(in: buffer.data, at: scan),
insn.mnemonic == "cbz" || insn.mnemonic == "cbnz",
let detail = insn.aarch64,
detail.operands.count >= 2
else {
scan += 4; continue
}
let ops = detail.operands
guard ops[0].type == AARCH64_OP_REG,
ops[1].type == AARCH64_OP_IMM
else {
scan += 4; continue
}
let reg = ops[0].reg
guard reg == AARCH64_REG_X0 || reg == AARCH64_REG_W0 else {
scan += 4; continue
}
let target = Int(ops[1].imm)
guard target > scan, target >= funcStart, target < funcEnd else {
scan += 4; continue
}
if isEntitlementErrorBlock(at: target, funcEnd: funcEnd) {
if !candidates.contains(where: { $0.off == scan }) {
candidates.append((off: scan, target: target))
}
}
scan += 4
}
if !candidates.isEmpty {
for cand in candidates {
let va = fileOffsetToVA(cand.off)
emit(
cand.off,
ARM64.nop,
patchID: "kernel.handle_get_dev_by_role.gate_\(String(format: "%X", cand.off))",
virtualAddress: va,
description: "NOP [handle_get_dev_by_role entitlement gate -> 0x\(String(format: "%X", cand.target))]"
)
}
return true
}
}
log(" [-] handle_get_dev_by_role entitlement gate pattern not found")
return false
}
/// Return true if the block at `targetOff` contains `mov w8, #0x332D` or
/// `mov w8, #0x333B` within the first 0x30 bytes (entitlement-gate line IDs).
private func isEntitlementErrorBlock(at targetOff: Int, funcEnd: Int) -> Bool {
let scanEnd = min(targetOff + 0x30, funcEnd)
var off = targetOff
while off + 4 <= scanEnd {
guard let insn = disasm.disassembleOne(in: buffer.data, at: off),
let detail = insn.aarch64
else {
off += 4; continue
}
// Stop on call, unconditional branch, or return different path.
if insn.mnemonic == "bl" || insn.mnemonic == "b"
|| insn.mnemonic == "ret" || insn.mnemonic == "retab"
{
break
}
if insn.mnemonic == "mov", detail.operands.count >= 2 {
let ops = detail.operands
if ops[0].type == AARCH64_OP_REG,
ops[0].reg == AARCH64_REG_W8,
ops[1].type == AARCH64_OP_IMM,
ops[1].imm == 0x332D || ops[1].imm == 0x333B
{
return true
}
}
off += 4
}
return false
}
// MARK: - Aggregate entry point
/// Apply all APFS mount patches (13, 14, 15, 16).
@discardableResult
func patchApfsMount() -> Bool {
let r13 = patchApfsVfsopMountCmp()
let r14 = patchApfsMountUpgradeChecks()
let r15 = patchHandleFsiocGraft()
let r16 = patchHandleGetDevByRoleEntitlement()
return r13 && r14 && r15 && r16
}
}

View File

@@ -0,0 +1,82 @@
// KernelPatchApfsSeal.swift APFS seal broken patch.
//
// Patch 2: NOP the conditional branch that leads into the
// "root volume seal is broken" panic path in _authapfs_seal_is_broken.
//
// Strategy (mirrors kernel_patch_apfs_seal.py):
// 1. Find string "root volume seal is broken" in the APFS kext text range.
// 2. For every ADRP+ADD xref, scan forward 0x40 bytes to find a
// BL to _panic (panicOffset).
// 3. From the ADRP offset, scan backward 0x200 bytes for a conditional
// branch whose target lands in [adrp_off - 0x40, bl_off + 4].
// 4. NOP that conditional branch.
import Capstone
import Foundation
extension KernelPatcher {
@discardableResult
func patchApfsSealBroken() -> Bool {
log("\n[2] _authapfs_seal_is_broken: seal broken panic")
guard let strOff = buffer.findString("root volume seal is broken") else {
log(" [-] string 'root volume seal is broken' not found")
return false
}
let apfsRange = apfsTextRange()
let refs = findStringRefs(in: apfsRange, stringOffset: strOff)
if refs.isEmpty {
log(" [-] no ADRP+ADD refs to 'root volume seal is broken'")
return false
}
guard let panicOff = panicOffset else {
log(" [-] _panic offset not resolved")
return false
}
for (adrpOff, addOff) in refs {
// Find BL to _panic within 0x40 bytes after the ADD.
var blOff: Int? = nil
let blScanEnd = min(addOff + 0x40, buffer.count - 4)
var scan = addOff
while scan <= blScanEnd {
if isBL(at: scan, target: panicOff) {
blOff = scan
break
}
scan += 4
}
guard let confirmedBlOff = blOff else { continue }
// Search backward from just before ADRP for a conditional branch
// whose target falls in [adrp_off - 0x40, bl_off + 4].
let errLo = adrpOff - 0x40
let errHi = confirmedBlOff + 4
let backLimit = max(adrpOff - 0x200, 0)
var back = adrpOff - 4
while back >= backLimit {
guard let insn = disasm.disassembleOne(in: buffer.data, at: back) else {
back -= 4
continue
}
if let branchTarget = conditionalBranchTarget(insn: insn) {
if branchTarget >= errLo, branchTarget <= errHi {
let desc = "NOP \(insn.mnemonic) (seal broken) [_authapfs_seal_is_broken]"
emit(back, ARM64.nop, patchID: "kernel.apfs_seal_broken", description: desc)
return true
}
}
back -= 4
}
}
log(" [-] conditional branch to seal-broken panic not found")
return false
}
}

View File

@@ -0,0 +1,69 @@
// KernelPatchApfsSnapshot.swift APFS root snapshot patch.
//
// Patch 1: NOP the tbnz/tbz w<reg>, #5 instruction that gates the
// sealed-volume root snapshot panic in _apfs_vfsop_mount.
//
// Strategy (mirrors kernel_patch_apfs_snapshot.py):
// 1. Find string "Rooting from snapshot with xid" (fallback: "Failed to find
// the root snapshot") in the APFS kext text range.
// 2. For every ADRP+ADD xref, scan forward 0x200 bytes for a
// tbz/tbnz <reg>, #5, <target> instruction.
// 3. NOP it.
import Capstone
import Foundation
extension KernelPatcher {
@discardableResult
func patchApfsRootSnapshot() -> Bool {
log("\n[1] _apfs_vfsop_mount: root snapshot sealed volume check")
let apfsRange = apfsTextRange()
// Try primary string anchor, fall back to secondary.
var refs = findStringRefs(in: apfsRange, string: "Rooting from snapshot with xid")
if refs.isEmpty {
refs = findStringRefs(in: apfsRange, string: "Failed to find the root snapshot")
if refs.isEmpty {
log(" [-] anchor strings not found in APFS text range")
return false
}
}
for (_, addOff) in refs {
let scanEnd = min(addOff + 0x200, buffer.count - 4)
var scan = addOff
while scan <= scanEnd {
guard let insn = disasm.disassembleOne(in: buffer.data, at: scan) else {
scan += 4
continue
}
guard insn.mnemonic == "tbnz" || insn.mnemonic == "tbz" else {
scan += 4
continue
}
// Check: tbz/tbnz <reg>, #5, <target>
// Operands: [0] = register, [1] = bit number (IMM), [2] = branch target (IMM)
guard
let detail = insn.aarch64,
detail.operands.count >= 2,
detail.operands[0].type == AARCH64_OP_REG,
detail.operands[1].type == AARCH64_OP_IMM,
detail.operands[1].imm == 5
else {
scan += 4
continue
}
let desc = "NOP \(insn.mnemonic) \(insn.operandString) (sealed vol check) [_apfs_vfsop_mount]"
emit(scan, ARM64.nop, patchID: "kernel.apfs_root_snapshot", description: desc)
return true
}
}
log(" [-] tbz/tbnz <reg>, #5 not found near xref")
return false
}
}

View File

@@ -0,0 +1,166 @@
// KernelPatchBsdInit.swift BSD init rootvp patch.
//
// Patch 3: NOP the conditional branch guarding the "rootvp not authenticated" panic.
// Python source: scripts/patchers/kernel_patch_bsd_init.py
//
// Allowed reveal flow (per CLAUDE.md guardrails):
// recover bsd_init locate rootvp panic block find the unique in-function BL
// cbnz w0/x0 panic bl imageboot_needed site patch the branch gate only.
import Capstone
import Foundation
// MARK: - Conditional branch mnemonics (ARM64)
private let condBranchMnemonics: Set<String> = [
"b.eq", "b.ne", "b.cs", "b.hs", "b.cc", "b.lo",
"b.mi", "b.pl", "b.vs", "b.vc", "b.hi", "b.ls",
"b.ge", "b.lt", "b.gt", "b.le", "b.al",
"cbz", "cbnz", "tbz", "tbnz",
]
extension KernelPatcher {
// MARK: - Panic Offset Resolution
/// Find the file offset of _panic by locating the most-called BL target.
///
/// _panic is by far the most-called function in the kernel; the BL index
/// built by buildBLIndex() maps target file-offsets to caller lists.
func findPanicOffsetIfNeeded() {
guard panicOffset == nil else { return }
// Pick the target with the most callers that is _panic.
guard let (target, callers) = blIndex.max(by: { $0.value.count < $1.value.count }),
callers.count >= 100
else {
log(" [!] _panic not found in BL index")
return
}
panicOffset = target
log(" [*] _panic at foff 0x\(String(format: "%X", target)) (\(callers.count) callers)")
}
// MARK: - Patch 3: rootvp not authenticated
/// NOP the conditional branch guarding the "rootvp not authenticated after mounting" panic.
///
/// Flow:
/// 1. Find the string "rootvp not authenticated after mounting".
/// 2. Find ADRP+ADD code references to that string.
/// 3. Forward-scan from the ADD for a BL _panic (within 0x40 bytes).
/// 4. Backward-scan from the ADRP for a conditional branch into the panic block.
/// 5. NOP that conditional branch.
@discardableResult
func patchBsdInitRootvp() -> Bool {
log("\n[3] _bsd_init: rootvp not authenticated panic")
findPanicOffsetIfNeeded()
guard let panicOff = panicOffset else {
log(" [-] _panic offset unknown, cannot patch")
return false
}
// Step 1: locate the anchor string.
guard let strOff = buffer.findString("rootvp not authenticated after mounting") else {
log(" [-] string not found")
return false
}
// Step 2: find ADRP+ADD references in code.
let refs = findStringRefs(strOff)
if refs.isEmpty {
log(" [-] no code refs in kernel __text")
return false
}
for (adrpOff, addOff) in refs {
// Step 3: scan forward from the ADD for BL _panic (up to 0x40 bytes).
let fwdLimit = min(addOff + 0x40, buffer.count - 4)
var blPanicOff: Int? = nil
var scan = addOff
while scan <= fwdLimit {
let insn = buffer.readU32(at: scan)
// BL: top 6 bits = 0b100101
if insn >> 26 == 0b100101 {
let imm26 = insn & 0x03FF_FFFF
let signedImm = Int32(bitPattern: imm26 << 6) >> 6
let target = scan + Int(signedImm) * 4
if target == panicOff {
blPanicOff = scan
break
}
}
scan += 4
}
guard let blPanic = blPanicOff else { continue }
// Step 4: search backward from adrpOff for a conditional branch whose
// target lands in the error block [blPanic - 0x40, blPanic + 4).
let errLo = blPanic - 0x40
let errHi = blPanic + 4
let backLimit = max(adrpOff - 0x400, 0)
var back = adrpOff - 4
while back >= backLimit {
defer { back -= 4 }
guard back + 4 <= buffer.count else { continue }
// Use Capstone to decode and check for conditional branch.
// Addresses in Capstone match file offsets (address param == offset).
guard let insn = disasm.disassembleOne(in: buffer.original, at: back) else {
continue
}
guard condBranchMnemonics.contains(insn.mnemonic) else { continue }
// Extract the branch target from operand string.
// Capstone renders CBZ/CBNZ as "cbz x0, #0x1234" and
// B.cond as "b.eq #0x1234". The immediate is the last token.
guard let branchTarget = decodeBranchTargetFromInsn(insn) else { continue }
// Target must fall within the error path block.
guard branchTarget >= errLo, branchTarget < errHi else { continue }
// Found the gate branch NOP it.
let va = fileOffsetToVA(back)
emit(
back,
ARM64.nop,
patchID: "kernel.bsd_init_rootvp",
virtualAddress: va,
description: "NOP \(insn.mnemonic) (rootvp auth) [_bsd_init]"
)
return true
}
}
log(" [-] conditional branch into panic path not found")
return false
}
// MARK: - Helpers
/// Extract the branch target (file offset) from a Capstone instruction's operand string.
///
/// Capstone renders the target as a hex literal (e.g., `#0x1abc`).
/// We parse the last whitespace-separated token and strip the leading `#`.
private func decodeBranchTargetFromInsn(_ insn: Instruction) -> Int? {
// operandString examples:
// "w0, #0xf7798c" (cbz / cbnz)
// "#0xf7798c" (b.eq / b.ne / etc.)
// "w0, #4, #0xf7798c" (tbz / tbnz)
let tokens = insn.operandString
.split(separator: ",")
.map { String($0).trimmingCharacters(in: .whitespaces) }
guard let last = tokens.last else { return nil }
let hex = last.hasPrefix("#") ? String(last.dropFirst()) : last
// Capstone may emit "0x" or a plain decimal.
if hex.hasPrefix("0x") || hex.hasPrefix("0X") {
return Int(hex.dropFirst(2), radix: 16)
}
return Int(hex)
}
}

View File

@@ -0,0 +1,172 @@
// KernelPatchDebugger.swift Debugger enablement patch (2 patches).
//
// Stubs _PE_i_can_has_debugger with: mov x0, #1; ret
// so the kernel always reports debugger enabled.
//
// Python source: scripts/patchers/kernel_patch_debugger.py
import Foundation
extension KernelPatcher {
/// Patches 6-7: stub _PE_i_can_has_debugger with mov x0,#1; ret.
///
/// Three strategies in priority order:
/// 1. Symbol table lookup via LC_SYMTAB nlist64 entries.
/// 2. BL-histogram candidate scan with ADRP-x8 + LDR-wN-from-x8 heuristics.
/// 3. Full code-range scan (same heuristics, no BL-count pre-filter).
@discardableResult
func patchDebugger() -> Bool {
log("\n[6-7] _PE_i_can_has_debugger: stub with mov x0,#1; ret")
// Strategy 1: symbol table lookup.
if let va = MachOParser.findSymbol(containing: "PE_i_can_has_debugger", in: buffer.data),
let funcOff = vaToFileOffset(va),
funcOff + 4 < buffer.count
{
let first = buffer.readU32(at: funcOff)
if first != 0, first != 0xD503_201F { // not zero, not NOP
log(" [+] symbol table match at 0x\(String(format: "%X", funcOff))")
emitDebugger(at: funcOff)
return true
}
}
// Strategy 2: BL histogram + lightweight signature.
log(" [*] trying code pattern search...")
let (histOff, histCallers) = findDebuggerByBLHistogram()
if histOff >= 0 {
log(" [+] code pattern match at 0x\(String(format: "%X", histOff)) (\(histCallers) callers)")
emitDebugger(at: histOff)
return true
}
// Strategy 3: full code-range scan.
log(" [*] trying full scan fallback...")
let (scanOff, scanCallers) = findDebuggerByFullScan()
if scanOff >= 0 {
log(" [+] fallback match at 0x\(String(format: "%X", scanOff)) (\(scanCallers) callers)")
emitDebugger(at: scanOff)
return true
}
log(" [-] _PE_i_can_has_debugger not found")
return false
}
// MARK: - Private Helpers
/// Emit the two patch instructions at the function entry point.
private func emitDebugger(at offset: Int) {
let va = fileOffsetToVA(offset)
emit(offset, ARM64.movX0_1,
patchID: "kernel.debugger.mov_x0_1",
virtualAddress: va,
description: "mov x0,#1 [_PE_i_can_has_debugger]")
emit(offset + 4, ARM64.ret,
patchID: "kernel.debugger.ret",
virtualAddress: va.map { $0 + 4 },
description: "ret [_PE_i_can_has_debugger]")
}
/// Return true if the raw 32-bit instruction is ADRP x8, <page>.
///
/// ADRP encoding: [31]=1, [28:24]=10000, [4:0]=Rd
/// Rd == 8 (x8).
private func isADRPx8(_ insn: UInt32) -> Bool {
(insn & 0x9F00_0000) == 0x9000_0000 && (insn & 0x1F) == 8
}
/// Return true if the 32-bit value is a recognised function-boundary instruction.
private func isFuncBoundary(_ insn: UInt32) -> Bool {
ARM64.funcBoundaryU32s.contains(insn)
}
/// Heuristic: scan the first `maxInsns` instructions after `funcOff` for
/// `ldr wN, [x8, ...]` the canonical _PE_i_can_has_debugger prologue.
private func hasWLdrFromX8(at funcOff: Int, maxInsns: Int = 8) -> Bool {
for k in 1 ... maxInsns {
let off = funcOff + k * 4
guard off + 4 <= buffer.count else { break }
let insn = buffer.readU32(at: off)
// LDR (unsigned offset, 32-bit): [31:30]=10, [29:27]=111, [26]=0, [25:24]=01
// Simplified: [31:22] == 0b1011_1001_01 = 0x2E5 mask 0xFFC00000 == 0xB9400000
// But we also need base reg == x8 (Rn field [9:5] == 8).
// Full check for LDR Wt, [Xn, #imm12]:
// [31:30]=10, [29:27]=111, [26]=0, [25]=0, [24]=1 0xB9400000 mask 0xFFC00000
guard insn & 0xFFC0_0000 == 0xB940_0000 else { continue }
let rn = (insn >> 5) & 0x1F
if rn == 8 { return true }
}
return false
}
/// Strategy 2: scan the pre-built BL index for the candidate with the most
/// callers in the [50, 250] window that matches the ADRP-x8 + LDR heuristic.
private func findDebuggerByBLHistogram() -> (Int, Int) {
var bestOff = -1
var bestCallers = 0
for (targetOff, callers) in blIndex {
let n = callers.count
guard n >= 50, n <= 250 else { continue }
guard isInCodeRange(targetOff) else { continue }
guard targetOff + 4 < buffer.count, targetOff & 3 == 0 else { continue }
let first = buffer.readU32(at: targetOff)
guard isADRPx8(first) else { continue }
// Verify preceding instruction is a function boundary (ret / retaa / retab / pacibsp).
if targetOff >= 4 {
let prev = buffer.readU32(at: targetOff - 4)
guard isFuncBoundary(prev) else { continue }
}
guard hasWLdrFromX8(at: targetOff) else { continue }
if n > bestCallers {
bestCallers = n
bestOff = targetOff
}
}
return (bestOff, bestCallers)
}
/// Strategy 3: linear sweep of all code ranges with the same heuristics.
private func findDebuggerByFullScan() -> (Int, Int) {
var bestOff = -1
var bestCallers = 0
for (rangeStart, rangeEnd) in codeRanges {
var off = rangeStart
while off + 12 <= rangeEnd {
defer { off += 4 }
let first = buffer.readU32(at: off)
guard isADRPx8(first) else { continue }
if off >= 4 {
let prev = buffer.readU32(at: off - 4)
guard isFuncBoundary(prev) else { continue }
}
guard hasWLdrFromX8(at: off) else { continue }
let n = blIndex[off]?.count ?? 0
guard n >= 50, n <= 250 else { continue }
if n > bestCallers {
bestCallers = n
bestOff = off
}
}
}
return (bestOff, bestCallers)
}
/// Return true if `offset` falls within any of the known code ranges.
private func isInCodeRange(_ offset: Int) -> Bool {
codeRanges.contains { offset >= $0.start && offset < $0.end }
}
}

View File

@@ -0,0 +1,129 @@
// KernelPatchDyldPolicy.swift DYLD policy patches (2 patches).
//
// Replaces two BL calls in _check_dyld_policy_internal with mov w0,#1.
// The function is located via its reference to the Swift Playgrounds
// entitlement string. The two BLs that immediately precede the string
// reference (each followed by a conditional branch on w0) are patched.
//
// Python source: scripts/patchers/kernel_patch_dyld_policy.py
import Foundation
extension KernelPatcher {
/// Patches 1011: Replace two BL calls in _check_dyld_policy_internal with mov w0,#1.
@discardableResult
func patchDyldPolicy() -> Bool {
log("\n[10-11] _check_dyld_policy_internal: mov w0,#1 (two BLs)")
// Anchor: entitlement string referenced from within the function.
guard let strOff = buffer.findString(
"com.apple.developer.swift-playgrounds-app.development-build"
) else {
log(" [-] swift-playgrounds entitlement string not found")
return false
}
let refs = findStringRefs(strOff)
guard !refs.isEmpty else {
log(" [-] no code refs found for swift-playgrounds entitlement string")
return false
}
for (adrpOff, _) in refs {
// Walk backward from the ADRP (exclusive), up to 80 bytes back,
// collecting (bl_offset, bl_target) pairs where the instruction
// immediately following the BL is a conditional branch on w0/x0.
var blsWithCond: [(blOff: Int, blTarget: Int)] = []
let scanStart = max(adrpOff - 80, 0)
// Iterate in 4-byte steps from adrpOff-4 down to scanStart
var back = adrpOff - 4
while back >= scanStart {
defer { back -= 4 }
guard back >= 0, back + 4 <= buffer.count else { continue }
if let target = decodeBL(at: back),
isCondBranchOnW0(at: back + 4)
{
blsWithCond.append((blOff: back, blTarget: target))
}
}
guard blsWithCond.count >= 2 else { continue }
// blsWithCond[0] is closest to ADRP (@2), [1] is farther (@1).
// The two BLs must call DIFFERENT functions to distinguish
// _check_dyld_policy_internal from functions that repeat a single helper.
let bl2 = blsWithCond[0] // closer to ADRP @2
let bl1 = blsWithCond[1] // farther from ADRP @1
guard bl1.blTarget != bl2.blTarget else { continue }
let va1 = fileOffsetToVA(bl1.blOff)
let va2 = fileOffsetToVA(bl2.blOff)
emit(
bl1.blOff,
ARM64.movW0_1,
patchID: "dyld_policy_1",
virtualAddress: va1,
description: "mov w0,#1 (was BL) [_check_dyld_policy_internal @1]"
)
emit(
bl2.blOff,
ARM64.movW0_1,
patchID: "dyld_policy_2",
virtualAddress: va2,
description: "mov w0,#1 (was BL) [_check_dyld_policy_internal @2]"
)
return true
}
log(" [-] _check_dyld_policy_internal BL pair not found")
return false
}
// MARK: - Private Helpers
/// Decode a BL instruction at `offset` and return its absolute file-offset target,
/// or nil if the instruction at that offset is not a BL.
///
/// BL encoding: bits [31:26] = 0b100101, imm26 is PC-relative in 4-byte units.
private func decodeBL(at offset: Int) -> Int? {
guard offset >= 0, offset + 4 <= buffer.count else { return nil }
let insn = buffer.readU32(at: offset)
guard insn >> 26 == 0b100101 else { return nil }
let imm26 = insn & 0x03FF_FFFF
// Sign-extend the 26-bit immediate
let signedImm = Int32(bitPattern: imm26 << 6) >> 6
return offset + Int(signedImm) * 4
}
/// Return true when the instruction at `offset` is a conditional branch
/// that tests w0 or x0 (CBZ, CBNZ, TBZ, TBNZ on register 0).
private func isCondBranchOnW0(at offset: Int) -> Bool {
guard offset >= 0, offset + 4 <= buffer.count else { return false }
let insn = buffer.readU32(at: offset)
// CBZ / CBNZ encoding: [31]=sf, [30:25]=011010 (CBZ) or 011011 (CBNZ)
// Rt = bits[4:0]; sf=0 32-bit (w), sf=1 64-bit (x)
let op54 = (insn >> 24) & 0xFF
if op54 == 0b0011_0100 || op54 == 0b0011_0101 // CBZ w/x
|| op54 == 0b1011_0100 || op54 == 0b1011_0101 // CBNZ w/x
{
return (insn & 0x1F) == 0 // Rt == 0 (w0 / x0)
}
// TBZ / TBNZ encoding: [31:24] = 0x36 (TBZ) or 0x37 (TBNZ), any bit
// Rt = bits[4:0]
if op54 == 0x36 || op54 == 0x37 || op54 == 0xB6 || op54 == 0xB7 {
return (insn & 0x1F) == 0 // Rt == 0
}
// B.cond encoding: [31:24] = 0x54, bit[4] = 0
// B.cond does not test a specific register, so we skip it
// the Python reference only uses cbz/cbnz/tbz/tbnz on w0.
return false
}
}

View File

@@ -0,0 +1,72 @@
// KernelPatchLaunchConstraints.swift Launch constraints patches (patches 45).
//
// Stubs _proc_check_launch_constraints with `mov w0, #0; ret` so that
// the AMFI launch-constraint gate always returns 0 (success).
//
// The kernel wrapper for _proc_check_launch_constraints does not embed the
// symbol name string directly. Instead the underlying AMFI function references
// "AMFI: Validation Category info", which is used as the anchor.
//
// Strategy (mirrors Python kernel_patch_launch_constraints.py):
// 1. Find the "AMFI: Validation Category info" string in the binary.
// 2. Find ADRP+ADD references into code.
// 3. Walk backward from each ADRP to locate the enclosing function start.
// 4. Emit `mov w0, #0` at funcStart and `ret` at funcStart+4.
//
// Python source: scripts/patchers/kernel_patch_launch_constraints.py
import Foundation
extension KernelPatcher {
// MARK: - Patches 45: _proc_check_launch_constraints
/// Patches 45: Stub _proc_check_launch_constraints with `mov w0, #0; ret`.
///
/// Returns true when both instructions are emitted, false on any failure.
@discardableResult
func patchLaunchConstraints() -> Bool {
log("\n[4-5] _proc_check_launch_constraints: stub with mov w0,#0; ret")
// Step 1: Locate the anchor string used inside the AMFI function.
guard let strOff = buffer.findString("AMFI: Validation Category info") else {
log(" [-] 'AMFI: Validation Category info' string not found")
return false
}
// Step 2: Find ADRP+ADD references from AMFI code into the string.
let amfiRange = amfiTextRange()
let refs = findStringRefs(strOff, in: amfiRange)
guard !refs.isEmpty else {
log(" [-] no code refs to 'AMFI: Validation Category info'")
return false
}
// Step 3: Walk each reference and find the enclosing function start.
for (adrpOff, _) in refs {
guard let funcStart = findFunctionStart(adrpOff) else { continue }
// Step 4: Emit the two-instruction stub at the function entry point.
let va0 = fileOffsetToVA(funcStart)
let va1 = fileOffsetToVA(funcStart + 4)
emit(
funcStart,
ARM64.movW0_0,
patchID: "launch_constraints_mov",
virtualAddress: va0,
description: "mov w0,#0 [_proc_check_launch_constraints]"
)
emit(
funcStart + 4,
ARM64.ret,
patchID: "launch_constraints_ret",
virtualAddress: va1,
description: "ret [_proc_check_launch_constraints]"
)
return true
}
log(" [-] function start not found for any ref to 'AMFI: Validation Category info'")
return false
}
}

View File

@@ -0,0 +1,183 @@
// KernelPatchPostValidation.swift Post-validation patches (NOP + CMP).
//
// Python source: scripts/patchers/kernel_patch_post_validation.py
//
// Patch 8 patchPostValidationNOP:
// Anchor: "TXM [Error]: CodeSignature" string ADRP+ADD ref scan forward
// for TBNZ NOP it.
//
// Patch 9 patchPostValidationCMP:
// Anchor: "AMFI: code signature validation failed" caller function
// BL targets in code range callee with `cmp w0,#imm ; b.ne` preceded by BL
// replace CMP with `cmp w0,w0`.
import Capstone
import Foundation
extension KernelPatcher {
// MARK: - Patch 8: NOP TBNZ after TXM CodeSignature error log
/// NOP the TBNZ that follows the TXM CodeSignature error log call.
///
/// The 'TXM [Error]: CodeSignature: selector: ...' string is followed by a BL
/// (printf/log), then a TBNZ that branches to an additional validation path.
/// NOPping the TBNZ skips that extra check.
@discardableResult
func patchPostValidationNOP() -> Bool {
log("\n[8] post-validation NOP (txm-related)")
guard let strOff = buffer.findString("TXM [Error]: CodeSignature") else {
log(" [-] 'TXM [Error]: CodeSignature' string not found")
return false
}
let refs = findStringRefs(strOff)
guard !refs.isEmpty else {
log(" [-] no code refs")
return false
}
for (_, addOff) in refs {
// Scan forward up to 0x40 bytes past the ADD for a TBNZ instruction.
let scanEnd = min(addOff + 0x40, buffer.count - 4)
for scan in stride(from: addOff, through: scanEnd, by: 4) {
let insns = disasm.disassemble(in: buffer.data, at: scan, count: 1)
guard let insn = insns.first else { continue }
guard insn.mnemonic == "tbnz" else { continue }
let va = fileOffsetToVA(scan)
emit(scan, ARM64.nop,
patchID: "kernel.post_validation.nop_tbnz",
virtualAddress: va,
description: "NOP \(insn.mnemonic) \(insn.operandString) [txm post-validation]")
return true
}
}
log(" [-] TBNZ not found after TXM error string ref")
return false
}
// MARK: - Patch 9: cmp w0,w0 in postValidation (AMFI code signing)
/// Replace `cmp w0, #imm` with `cmp w0, w0` in AMFI's postValidation path.
///
/// The 'AMFI: code signature validation failed' string is in a caller function,
/// not in postValidation itself. We find the caller, collect its BL targets,
/// then look inside each target for `cmp w0, #imm ; b.ne` preceded by a BL.
@discardableResult
func patchPostValidationCMP() -> Bool {
log("\n[9] postValidation: cmp w0,w0 (AMFI code signing)")
guard let strOff = buffer.findString("AMFI: code signature validation failed") else {
log(" [-] string not found")
return false
}
let refs = findStringRefs(strOff)
guard !refs.isEmpty else {
log(" [-] no code refs")
return false
}
// Collect unique caller function starts.
var seenFuncs = Set<Int>()
var hits: [Int] = []
for (adrpOff, _) in refs {
guard let callerStart = findFunctionStart(adrpOff),
!seenFuncs.contains(callerStart) else { continue }
seenFuncs.insert(callerStart)
// Find caller end: scan forward for next PACIBSP (= next function boundary).
let callerEnd = nextFunctionBoundary(after: callerStart, maxSize: 0x2000)
// Collect BL targets by direct instruction decode across the caller body.
var blTargets = Set<Int>()
for scan in stride(from: callerStart, to: callerEnd, by: 4) {
if scan > callerStart, buffer.readU32(at: scan) == ARM64.pacibspU32 { break }
if let target = decodeBLOffset(at: scan) {
blTargets.insert(target)
}
}
// For each BL target within our code range, look for cmp w0,#imm ; b.ne
// preceded by a BL within 2 instructions.
for target in blTargets.sorted() {
guard isWithinCodeRange(target) else { continue }
let calleeEnd = nextFunctionBoundary(after: target, maxSize: 0x200)
for off in stride(from: target, to: calleeEnd - 4, by: 4) {
// Stop at next function boundary.
if off > target, buffer.readU32(at: off) == ARM64.pacibspU32 { break }
let insns = disasm.disassemble(in: buffer.data, at: off, count: 2)
guard insns.count >= 2 else { continue }
let i0 = insns[0], i1 = insns[1]
guard i0.mnemonic == "cmp", i1.mnemonic == "b.ne" else { continue }
guard let detail0 = i0.aarch64, detail0.operands.count >= 2 else { continue }
let op0 = detail0.operands[0]
let op1 = detail0.operands[1]
guard op0.type == AARCH64_OP_REG, op0.reg == AARCH64_REG_W0 else { continue }
guard op1.type == AARCH64_OP_IMM else { continue }
// Must be preceded by a BL within 2 instructions (4 or 8 bytes back).
var hasBlBefore = false
for back in stride(from: off - 4, through: max(off - 8, target), by: -4) {
if decodeBLOffset(at: back) != nil {
hasBlBefore = true
break
}
}
guard hasBlBefore else { continue }
hits.append(off)
}
}
}
let uniqueHits = Array(Set(hits)).sorted()
guard uniqueHits.count == 1 else {
log(" [-] expected 1 postValidation compare site, found \(uniqueHits.count)")
return false
}
let patchOff = uniqueHits[0]
emit(patchOff, ARM64.cmpW0W0,
patchID: "kernel.post_validation.cmp_w0_w0",
virtualAddress: fileOffsetToVA(patchOff),
description: "cmp w0,w0 (was cmp w0,#imm) [postValidation]")
return true
}
// MARK: - Private helpers
/// Decode a BL instruction at `offset`. Returns the absolute file offset of the
/// target, or nil if the instruction at that offset is not a BL.
private func decodeBLOffset(at offset: Int) -> Int? {
guard offset >= 0, offset + 4 <= buffer.count else { return nil }
let insn = buffer.readU32(at: offset)
// BL: [31:26] = 0b100101
guard insn >> 26 == 0b100101 else { return nil }
let imm26 = insn & 0x03FF_FFFF
let signedImm = Int32(bitPattern: imm26 << 6) >> 6
return offset + Int(signedImm) * 4
}
/// Find the start offset of the next function after `start` (exclusive),
/// up to `maxSize` bytes ahead. Returns `start + maxSize` if none found.
private func nextFunctionBoundary(after start: Int, maxSize: Int) -> Int {
let limit = min(start + maxSize, buffer.count)
for off in stride(from: start + 4, to: limit, by: 4) {
if buffer.readU32(at: off) == ARM64.pacibspU32 {
return off
}
}
return limit
}
/// Return true if `offset` falls within any known code range.
private func isWithinCodeRange(_ offset: Int) -> Bool {
codeRanges.contains { offset >= $0.start && offset < $0.end }
}
}

View File

@@ -0,0 +1,346 @@
// KernelPatchSandbox.swift Sandbox MACF hook patches (10 patches).
//
// Stubs 5 Sandbox hook functions with: mov x0,#0; ret
// so that sandbox policy operations always succeed.
//
// Python source: scripts/patchers/kernel_patch_sandbox.py
// Algorithm:
// 1. Find the Sandbox mac_policy_conf struct by locating the "Sandbox" and
// "Seatbelt sandbox policy" strings and scanning __DATA/__DATA_CONST for a
// pointer pair that references them. The ops pointer lives at offset +32.
// 2. Discover the Sandbox kext __text range via __PRELINK_INFO so that
// each function pointer can be validated before patching.
// 3. For each of the 5 hook indices in the mac_policy_ops table, decode the
// chained fixup pointer and emit: mov x0,#0; ret.
//
// Hook indices (XNU xnu-11215+ mac_policy_ops struct):
// file_check_mmap index 36
// mount_check_mount index 87
// mount_check_remount index 88
// mount_check_umount index 91
// vnode_check_rename index 120
import Foundation
extension KernelPatcher {
// MARK: - Public Entry Point
/// Patches 17-26: stub Sandbox MACF hooks with mov x0,#0; ret.
@discardableResult
func patchSandbox() -> Bool {
log("\n[17-26] Sandbox MACF hooks")
guard let opsTableOff = findSandboxOpsTable() else {
return false
}
let sandboxRange = discoverSandboxTextRange()
let hooks: [(name: String, index: Int)] = [
("file_check_mmap", 36),
("mount_check_mount", 87),
("mount_check_remount", 88),
("mount_check_umount", 91),
("vnode_check_rename", 120),
]
var patchedCount = 0
for hook in hooks {
let entryOff = opsTableOff + hook.index * 8
guard entryOff + 8 <= buffer.count else {
log(" [-] ops[\(hook.index)] \(hook.name): offset out of bounds")
continue
}
let raw = buffer.readU64(at: entryOff)
let funcOff = decodeChainedPtr(raw)
guard funcOff >= 0 else {
log(" [-] ops[\(hook.index)] \(hook.name): NULL or invalid (raw=0x\(String(format: "%X", raw)))")
continue
}
if let range = sandboxRange {
guard funcOff >= range.start, funcOff < range.end else {
log(" [-] ops[\(hook.index)] \(hook.name): foff 0x\(String(format: "%X", funcOff)) outside Sandbox (0x\(String(format: "%X", range.start))-0x\(String(format: "%X", range.end)))")
continue
}
}
let va = fileOffsetToVA(funcOff)
emit(funcOff, ARM64.movX0_0,
patchID: "kernel.sandbox.\(hook.name).mov_x0_0",
virtualAddress: va,
description: "mov x0,#0 [_hook_\(hook.name)]")
emit(funcOff + 4, ARM64.ret,
patchID: "kernel.sandbox.\(hook.name).ret",
virtualAddress: va.map { $0 + 4 },
description: "ret [_hook_\(hook.name)]")
log(" [+] ops[\(hook.index)] \(hook.name) at foff 0x\(String(format: "%X", funcOff))")
patchedCount += 1
}
return patchedCount > 0
}
// MARK: - mac_policy_conf / ops table discovery
/// Find the Sandbox mac_policy_ops table via the mac_policy_conf struct.
///
/// Strategy (aligned with Python _find_sandbox_ops_table_via_conf):
/// - Locate the "Sandbox" C string (preceded by a NUL byte) and
/// "Seatbelt sandbox policy" C string in the binary.
/// - Scan __DATA_CONST and __DATA segments for non-auth chained fixup
/// pointers where the low 43 bits match the string file offsets.
/// - The mpc_ops pointer is at offset +32 from the start of the struct,
/// also decoded from the low 43 bits.
private func findSandboxOpsTable() -> Int? {
log(" [*] Finding Sandbox mac_policy_ops via mac_policy_conf...")
// Find "Sandbox\0" search for \0Sandbox\0 so we get the exact symbol string.
guard let sandboxRawOff = findNulPrefixedString("Sandbox") else {
log(" [-] Sandbox string not found")
return nil
}
// Find "Seatbelt sandbox policy\0"
guard let seatbeltOff = buffer.findString("Seatbelt sandbox policy") else {
log(" [-] Seatbelt sandbox policy string not found")
return nil
}
log(" [*] Sandbox string at foff 0x\(String(format: "%X", sandboxRawOff)), Seatbelt at 0x\(String(format: "%X", seatbeltOff))")
// Scan data segments for the mac_policy_conf struct.
// Python approach: skip auth pointers (bit63=1), match low 43 bits directly
// against file offsets for non-auth chained fixup pointers.
for seg in segments {
guard seg.name == "__DATA_CONST" || seg.name == "__DATA" else { continue }
guard seg.fileSize > 40 else { continue }
let segStart = Int(seg.fileOffset)
let segEnd = segStart + Int(seg.fileSize)
var i = segStart
while i + 40 <= segEnd {
let val0 = buffer.readU64(at: i)
// Skip zero and auth pointers
guard val0 != 0, val0 & (1 << 63) == 0 else {
i += 8
continue
}
// Check if low 43 bits match sandbox string offset
guard Int(val0 & 0x7FF_FFFF_FFFF) == sandboxRawOff else {
i += 8
continue
}
// Next 8 bytes should point to "Seatbelt sandbox policy"
let val1 = buffer.readU64(at: i + 8)
guard val1 & (1 << 63) == 0,
Int(val1 & 0x7FF_FFFF_FFFF) == seatbeltOff
else {
i += 8
continue
}
// mpc_ops is at offset +32, also decode low 43 bits
let opsVal = buffer.readU64(at: i + 32)
guard opsVal & (1 << 63) == 0 else {
i += 8
continue
}
let opsOff = Int(opsVal & 0x7FF_FFFF_FFFF)
guard opsOff > 0, opsOff < buffer.count else {
i += 8
continue
}
log(" [+] mac_policy_conf at foff 0x\(String(format: "%X", i)), mpc_ops -> 0x\(String(format: "%X", opsOff))")
return opsOff
}
}
log(" [-] mac_policy_conf not found")
return nil
}
// MARK: - Sandbox kext text range
/// Discover the Sandbox kext __text range via __PRELINK_INFO.
/// Returns nil if not found (patching will skip range validation).
private func discoverSandboxTextRange() -> (start: Int, end: Int)? {
// Find __PRELINK_INFO segment
guard let prelinkSeg = segments.first(where: { $0.name == "__PRELINK_INFO" }),
prelinkSeg.fileSize > 0
else {
return nil
}
let prelinkStart = Int(prelinkSeg.fileOffset)
let prelinkEnd = prelinkStart + Int(prelinkSeg.fileSize)
guard prelinkEnd <= buffer.count else { return nil }
let prelinkData = buffer.data[prelinkStart ..< prelinkEnd]
// Find the XML plist within the segment
guard let xmlStart = prelinkData.range(of: Data("<?xml".utf8)),
let plistEnd = prelinkData.range(of: Data("</plist>".utf8))
else {
return nil
}
let xmlRange = xmlStart.lowerBound ..< (plistEnd.upperBound)
let xmlData = prelinkData[xmlRange]
guard let plist = try? PropertyListSerialization.propertyList(from: Data(xmlData), format: nil) as? [String: Any],
let items = plist["_PrelinkInfoDictionary"] as? [[String: Any]]
else {
return nil
}
for item in items {
guard let bid = item["CFBundleIdentifier"] as? String,
bid == "com.apple.security.sandbox"
else {
continue
}
// _PrelinkExecutableLoadAddr is the kext's load address
guard let loadAddrRaw = item["_PrelinkExecutableLoadAddr"],
let loadAddrInt = (loadAddrRaw as? UInt64) ?? (loadAddrRaw as? Int).map({ UInt64(bitPattern: Int64($0)) })
else {
continue
}
let loadAddr = loadAddrInt & 0xFFFF_FFFF_FFFF_FFFF
guard loadAddr > baseVA else { continue }
let kextFoff = Int(loadAddr - baseVA)
guard kextFoff >= 0, kextFoff < buffer.count else { continue }
if let range = parseKextTextRange(at: kextFoff) {
log(" [*] Sandbox __text: 0x\(String(format: "%X", range.start))-0x\(String(format: "%X", range.end))")
return range
}
}
return nil
}
/// Parse an embedded kext Mach-O at the given file offset and return its
/// __TEXT_EXEC.__text (or __TEXT_EXEC segment) range in file offsets.
private func parseKextTextRange(at kextFoff: Int) -> (start: Int, end: Int)? {
guard kextFoff + 32 <= buffer.count else { return nil }
let magic = buffer.readU32(at: kextFoff)
guard magic == 0xFEED_FACF else { return nil } // MH_MAGIC_64
let ncmds: UInt32 = buffer.data.withUnsafeBytes { $0.load(fromByteOffset: kextFoff + 16, as: UInt32.self) }
var off = kextFoff + 32
for _ in 0 ..< ncmds {
guard off + 8 <= buffer.count else { break }
let cmd: UInt32 = buffer.data.withUnsafeBytes { $0.load(fromByteOffset: off, as: UInt32.self) }
let cmdsize: UInt32 = buffer.data.withUnsafeBytes { $0.load(fromByteOffset: off + 4, as: UInt32.self) }
guard cmdsize >= 8, cmdsize < 0x10000 else { break }
if cmd == 0x19 { // LC_SEGMENT_64
let segNameData = buffer.data[off + 8 ..< min(off + 24, buffer.count)]
let segName = String(data: segNameData, encoding: .utf8)?
.trimmingCharacters(in: CharacterSet(charactersIn: "\0")) ?? ""
if segName == "__TEXT_EXEC" {
let vmAddr: UInt64 = buffer.data.withUnsafeBytes { $0.load(fromByteOffset: off + 24, as: UInt64.self) }
let fileSize: UInt64 = buffer.data.withUnsafeBytes { $0.load(fromByteOffset: off + 48, as: UInt64.self) }
let nsects: UInt32 = buffer.data.withUnsafeBytes { $0.load(fromByteOffset: off + 64, as: UInt32.self) }
// Search sections for __text
var sectOff = off + 72
for _ in 0 ..< nsects {
guard sectOff + 80 <= buffer.count else { break }
let sectNameData = buffer.data[sectOff ..< min(sectOff + 16, buffer.count)]
let sectName = String(data: sectNameData, encoding: .utf8)?
.trimmingCharacters(in: CharacterSet(charactersIn: "\0")) ?? ""
if sectName == "__text" {
let sectAddr: UInt64 = buffer.data.withUnsafeBytes { $0.load(fromByteOffset: sectOff + 32, as: UInt64.self) }
let sectSize: UInt64 = buffer.data.withUnsafeBytes { $0.load(fromByteOffset: sectOff + 40, as: UInt64.self) }
guard sectAddr >= baseVA else { break }
let sectFoff = Int(sectAddr - baseVA)
return (sectFoff, sectFoff + Int(sectSize))
}
sectOff += 80
}
// Fallback: use the segment itself
guard vmAddr >= baseVA else { break }
let segFoff = Int(vmAddr - baseVA)
return (segFoff, segFoff + Int(fileSize))
}
}
off += Int(cmdsize)
}
return nil
}
// MARK: - Pointer helpers
/// Decode an arm64e chained fixup pointer to a file offset.
///
/// - auth rebase (bit63 = 1): foff = bits[31:0]
/// - non-auth rebase (bit63 = 0): VA = (bits[50:43] << 56) | bits[42:0]
private func decodeChainedPtr(_ val: UInt64) -> Int {
guard val != 0 else { return -1 }
if val & (1 << 63) != 0 {
// Authenticated rebase: lower 32 bits are file offset
return Int(val & 0xFFFF_FFFF)
} else {
// Non-authenticated rebase: reconstruct VA
let low43 = val & 0x7FF_FFFF_FFFF
let high8 = (val >> 43) & 0xFF
let fullVA = (high8 << 56) | low43
guard fullVA > baseVA else { return -1 }
return Int(fullVA - baseVA)
}
}
/// Resolve a 64-bit data pointer to a file offset, trying both chained
/// fixup decoding and a plain (VA baseVA) conversion.
private func resolvePointerToFileOffset(_ val: UInt64) -> Int? {
guard val != 0 else { return nil }
// Try chained fixup first
let decoded = decodeChainedPtr(val)
if decoded > 0, decoded < buffer.count {
return decoded
}
// Try plain VA file offset
if val > baseVA {
let foff = Int(val - baseVA)
if foff >= 0, foff < buffer.count {
return foff
}
}
return nil
}
/// Find a NUL-prefixed string (i.e. the exact C symbol "Sandbox" at a NUL boundary).
/// Returns the file offset of the first character (after the NUL).
private func findNulPrefixedString(_ string: String) -> Int? {
guard let encoded = string.data(using: .utf8) else { return nil }
var pattern = Data([0]) // NUL prefix
pattern.append(contentsOf: encoded)
pattern.append(0) // NUL terminator
if let range = buffer.data.range(of: pattern) {
return range.lowerBound + 1 // skip leading NUL
}
return nil
}
}

View File

@@ -0,0 +1,403 @@
// FirmwareManifest.swift BuildManifest/Restore.plist generation.
//
// Translated from: scripts/fw_manifest.py
//
// Merges cloudOS boot-chain (vresearch101ap) with vphone600 runtime components
// (device tree, SEP, kernel) and iPhone OS images into a single DFU erase-install
// Build Identity.
import Foundation
// MARK: - Plist type aliases
/// Convenience alias for untyped plist dictionaries.
public typealias PlistDict = [String: Any]
// MARK: - FirmwareManifest
/// Generates hybrid BuildManifest and Restore plists for VM firmware.
///
/// The VM hardware identifies as vresearch101ap (BDID 0x90) in DFU mode, so the
/// identity fields must match for TSS/SHSH signing. Runtime components use the
/// vphone600 variant because its device tree sets MKB dt=1 (keybag-less boot).
public enum FirmwareManifest {
// MARK: - Errors
public enum ManifestError: Error, CustomStringConvertible {
case fileNotFound(String)
case invalidPlist(String)
case identityNotFound(String)
case missingKey(String)
public var description: String {
switch self {
case let .fileNotFound(path):
"Manifest file not found: \(path)"
case let .invalidPlist(path):
"Invalid plist: \(path)"
case let .identityNotFound(msg):
"Identity not found: \(msg)"
case let .missingKey(key):
"Missing required key: \(key)"
}
}
}
// MARK: - Identity indices
/// Discovered identity indices from cloudOS and iPhone manifests.
struct IdentityIndices {
/// vresearch101ap release identity (boot chain matches DFU hardware).
let prod: Int
/// vresearch101ap research identity (research iBoot, TXM).
let res: Int
/// vphone600ap release identity (device tree, SEP, restore kernel).
let vp: Int
/// vphone600ap research identity (kernel cache).
let vpr: Int
/// iPhone erase identity (OS images).
let iPhoneErase: Int
}
// MARK: - Public API
/// Generate hybrid BuildManifest.plist and Restore.plist.
///
/// - Parameters:
/// - iPhoneDir: Path to the extracted iPhone IPSW directory.
/// - cloudOSDir: Path to the extracted cloudOS IPSW directory.
/// - verbose: Print progress messages.
public static func generate(
iPhoneDir: URL,
cloudOSDir: URL,
verbose: Bool = true
) throws {
// Load source plists.
let cloudOSBM = try loadPlist(cloudOSDir.appendingPathComponent("BuildManifest.plist"))
let iPhoneBM = try loadPlist(iPhoneDir.appendingPathComponent("BuildManifest.plist"))
let cloudOSRP = try loadPlist(cloudOSDir.appendingPathComponent("Restore.plist"))
let iPhoneRP = try loadPlist(iPhoneDir.appendingPathComponent("Restore.plist"))
guard let cloudIdentities = cloudOSBM["BuildIdentities"] as? [PlistDict] else {
throw ManifestError.missingKey("BuildIdentities in cloudOS BuildManifest")
}
guard let iPhoneIdentities = iPhoneBM["BuildIdentities"] as? [PlistDict] else {
throw ManifestError.missingKey("BuildIdentities in iPhone BuildManifest")
}
// Discover source identities.
let (prod, res) = try findCloudOS(cloudIdentities, deviceClass: "vresearch101ap")
let (vp, vpr) = try findCloudOS(cloudIdentities, deviceClass: "vphone600ap")
let iErase = try findIPhoneErase(iPhoneIdentities)
if verbose {
print(" cloudOS vresearch101ap: release=#\(prod), research=#\(res)")
print(" cloudOS vphone600ap: release=#\(vp), research=#\(vpr)")
print(" iPhone erase: #\(iErase)")
}
// Build the single DFU erase identity.
let buildIdentity = try buildEraseIdentity(
cloudIdentities: cloudIdentities,
iPhoneIdentities: iPhoneIdentities,
prod: prod, res: res, vp: vp, vpr: vpr, iErase: iErase
)
// Assemble BuildManifest.
let buildManifest: PlistDict = [
"BuildIdentities": [buildIdentity],
"ManifestVersion": cloudOSBM["ManifestVersion"] as Any,
"ProductBuildVersion": cloudOSBM["ProductBuildVersion"] as Any,
"ProductVersion": cloudOSBM["ProductVersion"] as Any,
"SupportedProductTypes": ["iPhone99,11"],
]
// Assemble Restore.plist.
let restore = try buildRestorePlist(
cloudOSRP: cloudOSRP,
iPhoneRP: iPhoneRP
)
// Write output.
try writePlist(buildManifest, to: iPhoneDir.appendingPathComponent("BuildManifest.plist"))
if verbose { print(" wrote BuildManifest.plist") }
try writePlist(restore, to: iPhoneDir.appendingPathComponent("Restore.plist"))
if verbose { print(" wrote Restore.plist") }
}
// MARK: - Identity Discovery
/// Determine whether a build identity is a research variant.
static func isResearch(_ bi: PlistDict) -> Bool {
for comp in ["LLB", "iBSS", "iBEC"] {
let path = (bi["Manifest"] as? PlistDict)?[comp]
.flatMap { $0 as? PlistDict }?["Info"]
.flatMap { $0 as? PlistDict }?["Path"] as? String ?? ""
guard !path.isEmpty else { continue }
let parts = (path as NSString).lastPathComponent.split(separator: ".")
if parts.count == 4 {
return parts[2].contains("RESEARCH")
}
}
let variant = (bi["Info"] as? PlistDict)?["Variant"] as? String ?? ""
return variant.lowercased().contains("research")
}
/// Find release and research identity indices for the given DeviceClass.
static func findCloudOS(
_ identities: [PlistDict],
deviceClass: String
) throws -> (release: Int, research: Int) {
var release: Int?
var research: Int?
for (i, bi) in identities.enumerated() {
let dc = (bi["Info"] as? PlistDict)?["DeviceClass"] as? String ?? ""
guard dc == deviceClass else { continue }
if isResearch(bi) {
if research == nil { research = i }
} else {
if release == nil { release = i }
}
}
guard let rel = release else {
throw ManifestError.identityNotFound("No release identity for DeviceClass=\(deviceClass)")
}
guard let res = research else {
throw ManifestError.identityNotFound("No research identity for DeviceClass=\(deviceClass)")
}
return (rel, res)
}
/// Return the index of the first iPhone erase identity.
static func findIPhoneErase(_ identities: [PlistDict]) throws -> Int {
for (i, bi) in identities.enumerated() {
let variant = ((bi["Info"] as? PlistDict)?["Variant"] as? String ?? "").lowercased()
if !variant.contains("research"),
!variant.contains("upgrade"),
!variant.contains("recovery")
{
return i
}
}
throw ManifestError.identityNotFound("No erase identity found in iPhone manifest")
}
// MARK: - Build Identity Construction
/// Deep-copy a single Manifest entry from a build identity.
static func entry(
_ identities: [PlistDict],
_ idx: Int,
_ key: String
) throws -> PlistDict {
guard let manifest = identities[idx]["Manifest"] as? PlistDict,
let value = manifest[key] as? PlistDict
else {
throw ManifestError.missingKey("\(key) in identity #\(idx)")
}
return deepCopyPlistDict(value)
}
/// Build the single DFU erase identity by merging components from multiple sources.
static func buildEraseIdentity(
cloudIdentities C: [PlistDict],
iPhoneIdentities I: [PlistDict],
prod: Int, res: Int, vp: Int, vpr: Int, iErase: Int
) throws -> PlistDict {
// Identity base from vresearch101ap PROD.
var bi = deepCopyPlistDict(C[prod])
bi["Manifest"] = PlistDict()
bi["Ap,ProductType"] = "ComputeModule14,2"
bi["Ap,Target"] = "VRESEARCH101AP"
bi["Ap,TargetType"] = "vresearch101"
bi["ApBoardID"] = "0x90"
bi["ApChipID"] = "0xFE01"
bi["ApSecurityDomain"] = "0x01"
// Remove NeRDEpoch and RestoreAttestationMode from top-level and Info.
for key in ["NeRDEpoch", "RestoreAttestationMode"] {
bi.removeValue(forKey: key)
if var info = bi["Info"] as? PlistDict {
info.removeValue(forKey: key)
bi["Info"] = info
}
}
// Set Info fields.
if var info = bi["Info"] as? PlistDict {
info["FDRSupport"] = false
info["Variant"] = "Darwin Cloud Customer Erase Install (IPSW)"
info["VariantContents"] = [
"BasebandFirmware": "Release",
"DCP": "DarwinProduction",
"DFU": "DarwinProduction",
"Firmware": "DarwinProduction",
"InitiumBaseband": "Production",
"InstalledKernelCache": "Production",
"InstalledSPTM": "Production",
"OS": "Production",
"RestoreKernelCache": "Production",
"RestoreRamDisk": "Production",
"RestoreSEP": "DarwinProduction",
"RestoreSPTM": "Production",
"SEP": "DarwinProduction",
"VinylFirmware": "Release",
] as PlistDict
bi["Info"] = info
}
var m = PlistDict()
// Boot chain (vresearch101 -- matches DFU hardware).
m["LLB"] = try entry(C, prod, "LLB")
m["iBSS"] = try entry(C, prod, "iBSS")
m["iBEC"] = try entry(C, prod, "iBEC")
m["iBoot"] = try entry(C, res, "iBoot") // research iBoot
// Security monitors (shared across board configs).
m["Ap,RestoreSecurePageTableMonitor"] = try entry(C, prod, "Ap,RestoreSecurePageTableMonitor")
m["Ap,RestoreTrustedExecutionMonitor"] = try entry(C, prod, "Ap,RestoreTrustedExecutionMonitor")
m["Ap,SecurePageTableMonitor"] = try entry(C, prod, "Ap,SecurePageTableMonitor")
m["Ap,TrustedExecutionMonitor"] = try entry(C, res, "Ap,TrustedExecutionMonitor")
// Device tree (vphone600ap -- sets MKB dt=1 for keybag-less boot).
m["DeviceTree"] = try entry(C, vp, "DeviceTree")
m["RestoreDeviceTree"] = try entry(C, vp, "RestoreDeviceTree")
// SEP (vphone600 -- matches device tree).
m["SEP"] = try entry(C, vp, "SEP")
m["RestoreSEP"] = try entry(C, vp, "RestoreSEP")
// Kernel (vphone600, patched by fw_patch).
m["KernelCache"] = try entry(C, vpr, "KernelCache") // research
m["RestoreKernelCache"] = try entry(C, vp, "RestoreKernelCache") // release
// Recovery mode (vphone600ap carries this entry).
m["RecoveryMode"] = try entry(C, vp, "RecoveryMode")
// CloudOS erase ramdisk.
m["RestoreRamDisk"] = try entry(C, prod, "RestoreRamDisk")
m["RestoreTrustCache"] = try entry(C, prod, "RestoreTrustCache")
// iPhone OS image.
m["Ap,SystemVolumeCanonicalMetadata"] = try entry(I, iErase, "Ap,SystemVolumeCanonicalMetadata")
m["OS"] = try entry(I, iErase, "OS")
m["StaticTrustCache"] = try entry(I, iErase, "StaticTrustCache")
m["SystemVolume"] = try entry(I, iErase, "SystemVolume")
bi["Manifest"] = m
return bi
}
// MARK: - Restore.plist
/// Build the merged Restore.plist from cloudOS and iPhone sources.
static func buildRestorePlist(
cloudOSRP: PlistDict,
iPhoneRP: PlistDict
) throws -> PlistDict {
// DeviceMap: iPhone first entry + cloudOS vphone600ap/vresearch101ap entries.
guard let iPhoneDeviceMap = iPhoneRP["DeviceMap"] as? [PlistDict],
!iPhoneDeviceMap.isEmpty
else {
throw ManifestError.missingKey("DeviceMap in iPhone Restore.plist")
}
guard let cloudDeviceMap = cloudOSRP["DeviceMap"] as? [PlistDict] else {
throw ManifestError.missingKey("DeviceMap in cloudOS Restore.plist")
}
var deviceMap: [PlistDict] = [iPhoneDeviceMap[0]]
for d in cloudDeviceMap {
if let bc = d["BoardConfig"] as? String,
bc == "vphone600ap" || bc == "vresearch101ap"
{
deviceMap.append(d)
}
}
// SupportedProductTypeIDs: merge DFU and Recovery from both sources.
guard let iPhoneTypeIDs = iPhoneRP["SupportedProductTypeIDs"] as? PlistDict,
let cloudTypeIDs = cloudOSRP["SupportedProductTypeIDs"] as? PlistDict
else {
throw ManifestError.missingKey("SupportedProductTypeIDs")
}
var mergedTypeIDs = PlistDict()
for cat in ["DFU", "Recovery"] {
let iList = iPhoneTypeIDs[cat] as? [Any] ?? []
let cList = cloudTypeIDs[cat] as? [Any] ?? []
mergedTypeIDs[cat] = iList + cList
}
// SupportedProductTypes: merge from both sources.
let iPhoneProductTypes = iPhoneRP["SupportedProductTypes"] as? [String] ?? []
let cloudProductTypes = cloudOSRP["SupportedProductTypes"] as? [String] ?? []
// SystemRestoreImageFileSystems: deep copy from iPhone.
guard let sysRestoreFS = iPhoneRP["SystemRestoreImageFileSystems"] else {
throw ManifestError.missingKey("SystemRestoreImageFileSystems in iPhone Restore.plist")
}
return [
"ProductBuildVersion": cloudOSRP["ProductBuildVersion"] as Any,
"ProductVersion": cloudOSRP["ProductVersion"] as Any,
"DeviceMap": deviceMap,
"SupportedProductTypeIDs": mergedTypeIDs,
"SupportedProductTypes": iPhoneProductTypes + cloudProductTypes,
"SystemRestoreImageFileSystems": deepCopyAny(sysRestoreFS),
]
}
// MARK: - Plist I/O
/// Load a plist file and return its top-level dictionary.
static func loadPlist(_ url: URL) throws -> PlistDict {
let path = url.path
guard FileManager.default.fileExists(atPath: path) else {
throw ManifestError.fileNotFound(path)
}
let data = try Data(contentsOf: url)
guard let dict = try PropertyListSerialization.propertyList(
from: data,
options: [],
format: nil
) as? PlistDict else {
throw ManifestError.invalidPlist(path)
}
return dict
}
/// Write a plist dictionary to a file in XML format.
static func writePlist(_ dict: PlistDict, to url: URL) throws {
let data = try PropertyListSerialization.data(
fromPropertyList: dict,
format: .xml,
options: 0
)
try data.write(to: url, options: .atomic)
}
// MARK: - Deep Copy
/// Deep-copy a plist dictionary (all nested containers are copied).
static func deepCopyPlistDict(_ dict: PlistDict) -> PlistDict {
var result = PlistDict()
for (key, value) in dict {
result[key] = deepCopyAny(value)
}
return result
}
/// Deep-copy any plist value, recursing into dicts and arrays.
static func deepCopyAny(_ value: Any) -> Any {
if let dict = value as? PlistDict {
deepCopyPlistDict(dict)
} else if let array = value as? [Any] {
array.map { deepCopyAny($0) }
} else {
// Scalar types (String, Int, Bool, Data, Date) are value types or immutable.
value
}
}
}

View File

@@ -0,0 +1,334 @@
// FirmwarePipeline.swift Orchestrates full boot-chain firmware patching.
//
// Swift equivalent of scripts/fw_patch.py main().
//
// Pipeline order: AVPBooter iBSS iBEC LLB TXM Kernel DeviceTree
//
// Variant selection (mirrors Makefile targets):
// .regular base patchers only
// .dev TXMDevPatcher instead of TXMPatcher
// .jb TXMDevPatcher + IBootJBPatcher (iBSS) + KernelJBPatcher
import Foundation
/// Orchestrates firmware patching for all boot-chain components.
///
/// The pipeline discovers firmware files inside the VM directory (mirroring
/// `find_restore_dir` + `find_file` in the Python source), loads each file,
/// delegates to the appropriate ``Patcher``, and writes the patched data back.
///
/// **IM4P handling:** The Python pipeline loads IM4P containers, extracts
/// payloads, patches them, and repackages. This Swift pipeline is designed to
/// support an identical flow via a pluggable ``FirmwareLoader`` once IM4P
/// support is implemented. Until then, raw-data loading is used directly.
public final class FirmwarePipeline {
// MARK: - Variant
public enum Variant: String, Sendable {
case regular
case dev
case jb
}
// MARK: - Firmware Loader (pluggable IM4P support)
/// Abstraction over IM4P vs raw firmware loading.
///
/// When IM4P handling is ready, provide a conforming type that
/// decompresses/extracts the payload on load and repackages on save.
/// The default ``RawFirmwareLoader`` reads/writes plain bytes.
public protocol FirmwareLoader {
/// Load firmware from `url`, returning the mutable payload data.
func load(from url: URL) throws -> Data
/// Save patched `data` back to `url`, repackaging as needed.
func save(_ data: Data, to url: URL) throws
}
/// Default loader: reads and writes raw bytes with no container handling.
public struct RawFirmwareLoader: FirmwareLoader {
public init() {}
public func load(from url: URL) throws -> Data {
try Data(contentsOf: url)
}
public func save(_ data: Data, to url: URL) throws {
try data.write(to: url)
}
}
// MARK: - Component Descriptor
/// Describes a single firmware component in the pipeline.
struct ComponentDescriptor {
let name: String
/// If true, search paths are relative to the Restore directory.
/// If false, relative to the VM directory root.
let inRestoreDir: Bool
/// Glob patterns used to locate the file (tried in order).
let searchPatterns: [String]
/// Factory that creates the appropriate ``Patcher`` for the loaded data.
let patcherFactory: (Data, Bool) -> any Patcher
}
// MARK: - Properties
let vmDirectory: URL
let variant: Variant
let verbose: Bool
let loader: any FirmwareLoader
// MARK: - Init
public init(
vmDirectory: URL,
variant: Variant = .regular,
verbose: Bool = true,
loader: (any FirmwareLoader)? = nil
) {
self.vmDirectory = vmDirectory
self.variant = variant
self.verbose = verbose
self.loader = loader ?? RawFirmwareLoader()
}
// MARK: - Pipeline Execution
/// Run the full patching pipeline.
///
/// Returns combined ``PatchRecord`` arrays from every component, in order.
/// Throws on the first component that fails to patch.
public func patchAll() throws -> [PatchRecord] {
let restoreDir = try findRestoreDirectory()
log("[*] VM directory: \(vmDirectory.path)")
log("[*] Restore directory: \(restoreDir.path)")
let components = buildComponentList()
log("[*] Patching \(components.count) boot-chain components ...")
var allRecords: [PatchRecord] = []
for component in components {
let baseDir = component.inRestoreDir ? restoreDir : vmDirectory
let fileURL = try findFile(in: baseDir, patterns: component.searchPatterns, label: component.name)
log("\n\(String(repeating: "=", count: 60))")
log(" \(component.name): \(fileURL.path)")
log(String(repeating: "=", count: 60))
// Load
let rawData = try loader.load(from: fileURL)
log(" format: \(rawData.count) bytes")
// Patch
let patcher = component.patcherFactory(rawData, verbose)
let records = try patcher.findAll()
guard !records.isEmpty else {
throw PatcherError.patchSiteNotFound("\(component.name): no patches found")
}
let count = try patcher.apply()
log(" [+] \(count) \(component.name) patches applied")
// Save retrieve the mutated buffer data from the patcher.
let patchedData = extractPatchedData(from: patcher, fallback: rawData, records: records)
try loader.save(patchedData, to: fileURL)
log(" [+] saved")
allRecords.append(contentsOf: records)
}
log("\n\(String(repeating: "=", count: 60))")
log(" All \(components.count) components patched successfully! (\(allRecords.count) total patches)")
log(String(repeating: "=", count: 60))
return allRecords
}
// MARK: - Component List Builder
/// Build the ordered component list based on the variant.
func buildComponentList() -> [ComponentDescriptor] {
var components: [ComponentDescriptor] = []
// 1. AVPBooter always present, lives in VM root
components.append(ComponentDescriptor(
name: "AVPBooter",
inRestoreDir: false,
searchPatterns: ["AVPBooter*.bin"],
patcherFactory: { data, verbose in
AVPBooterPatcher(data: data, verbose: verbose)
}
))
// 2. iBSS JB variant adds nonce-skip via IBootJBPatcher
components.append(ComponentDescriptor(
name: "iBSS",
inRestoreDir: true,
searchPatterns: ["Firmware/dfu/iBSS.vresearch101.RELEASE.im4p"],
patcherFactory: { [variant] data, verbose in
if variant == .jb {
return IBootJBPatcher(data: data, mode: .ibss, verbose: verbose)
}
return IBootPatcher(data: data, mode: .ibss, verbose: verbose)
}
))
// 3. iBEC same for all variants
components.append(ComponentDescriptor(
name: "iBEC",
inRestoreDir: true,
searchPatterns: ["Firmware/dfu/iBEC.vresearch101.RELEASE.im4p"],
patcherFactory: { data, verbose in
IBootPatcher(data: data, mode: .ibec, verbose: verbose)
}
))
// 4. LLB same for all variants
components.append(ComponentDescriptor(
name: "LLB",
inRestoreDir: true,
searchPatterns: ["Firmware/all_flash/LLB.vresearch101.RELEASE.im4p"],
patcherFactory: { data, verbose in
IBootPatcher(data: data, mode: .llb, verbose: verbose)
}
))
// 5. TXM dev/jb variants use TXMDevPatcher (adds entitlements, debugger, dev-mode)
components.append(ComponentDescriptor(
name: "TXM",
inRestoreDir: true,
searchPatterns: ["Firmware/txm.iphoneos.research.im4p"],
patcherFactory: { [variant] data, verbose in
if variant == .dev || variant == .jb {
return TXMDevPatcher(data: data, verbose: verbose)
}
return TXMPatcher(data: data, verbose: verbose)
}
))
// 6. Kernel JB variant uses KernelJBPatcher (84 patches)
components.append(ComponentDescriptor(
name: "kernelcache",
inRestoreDir: true,
searchPatterns: ["kernelcache.research.vphone600"],
patcherFactory: { [variant] data, verbose in
if variant == .jb {
return KernelJBPatcher(data: data, verbose: verbose)
}
return KernelPatcher(data: data, verbose: verbose)
}
))
// 7. DeviceTree same for all variants (stub patcher for now)
components.append(ComponentDescriptor(
name: "DeviceTree",
inRestoreDir: true,
searchPatterns: ["Firmware/all_flash/DeviceTree.vphone600ap.im4p"],
patcherFactory: { data, verbose in
DeviceTreePatcherAdapter(data: data, verbose: verbose)
}
))
return components
}
// MARK: - File Discovery
/// Find the `*Restore*` subdirectory inside the VM directory.
/// Mirrors Python `find_restore_dir`.
func findRestoreDirectory() throws -> URL {
let fm = FileManager.default
let contents = try fm.contentsOfDirectory(at: vmDirectory, includingPropertiesForKeys: [.isDirectoryKey])
.filter { (try? $0.resourceValues(forKeys: [.isDirectoryKey]).isDirectory) == true }
.filter { $0.lastPathComponent.contains("Restore") }
.sorted { $0.lastPathComponent < $1.lastPathComponent }
guard let restoreDir = contents.first else {
throw PatcherError.fileNotFound("No *Restore* directory found in \(vmDirectory.path). Run prepare_firmware first.")
}
return restoreDir
}
/// Find a firmware file by trying glob-style patterns under `baseDir`.
/// Mirrors Python `find_file`.
func findFile(in baseDir: URL, patterns: [String], label: String) throws -> URL {
let fm = FileManager.default
for pattern in patterns {
let candidate = baseDir.appendingPathComponent(pattern)
if fm.fileExists(atPath: candidate.path) {
return candidate
}
}
let searched = patterns.map { baseDir.appendingPathComponent($0).path }.joined(separator: "\n ")
throw PatcherError.fileNotFound("\(label) not found. Searched:\n \(searched)")
}
// MARK: - Data Extraction
/// Extract the patched data from a patcher's internal buffer.
///
/// All current patchers own a ``BinaryBuffer`` whose `.data` property
/// holds the mutated bytes after `apply()`. We use protocol-based
/// access where possible and fall back to manual patch application.
func extractPatchedData(from patcher: any Patcher, fallback: Data, records: [PatchRecord]) -> Data {
// Try known patcher types that expose their buffer.
if let avp = patcher as? AVPBooterPatcher { return avp.buffer.data }
if let iboot = patcher as? IBootPatcher { return iboot.buffer.data }
if let txm = patcher as? TXMPatcher { return txm.buffer.data }
if let kp = patcher as? KernelPatcher { return kp.buffer.data }
if let kjb = patcher as? KernelJBPatcher { return kjb.buffer.data }
if let dt = patcher as? DeviceTreePatcherAdapter { return dt.buffer.data }
// Fallback: apply records manually to a copy of the original data.
var data = fallback
for record in records {
let range = record.fileOffset ..< record.fileOffset + record.patchedBytes.count
data.replaceSubrange(range, with: record.patchedBytes)
}
return data
}
// MARK: - Logging
func log(_ message: String) {
if verbose {
print(message)
}
}
}
// MARK: - DeviceTree Patcher Adapter
/// Adapter that wraps DeviceTree patching behind the ``Patcher`` protocol.
///
/// The real ``DeviceTreePatcher`` is currently a stub enum. This adapter
/// provides a conforming type so the pipeline can include DeviceTree in the
/// component list. Replace the body once `DeviceTreePatcher` is implemented.
final class DeviceTreePatcherAdapter: Patcher {
let component = "devicetree"
let verbose: Bool
let buffer: BinaryBuffer
init(data: Data, verbose: Bool = true) {
buffer = BinaryBuffer(data)
self.verbose = verbose
}
func findAll() throws -> [PatchRecord] {
// DeviceTree patching is not yet migrated to Swift.
// Return an empty array; the pipeline will throw patchSiteNotFound
// unless the caller skips validation for stubs.
[]
}
@discardableResult
func apply() throws -> Int {
let records = try findAll()
for record in records {
buffer.writeBytes(at: record.fileOffset, bytes: record.patchedBytes)
}
return records.count
}
}

View File

@@ -0,0 +1,522 @@
// TXMDevPatcher.swift Dev-variant TXM patcher (entitlements, debugger, developer mode).
//
// Python source: scripts/patchers/txm_dev.py
import Foundation
/// Dev-variant patcher for TXM images.
///
/// Adds 5 patch methods (11 patch records) beyond base trustcache bypass:
/// 1. selector24 force PASS (mov w0, #0xa1 + b epilogue)
/// 2. get-task-allow entitlement BL mov x0, #1
/// 3. selector42|29 shellcode hook + manifest flag force
/// 4. debugger entitlement BL mov w0, #1
/// 5. developer-mode guard nop
public final class TXMDevPatcher: TXMPatcher {
override public func findAll() throws -> [PatchRecord] {
patches = []
try patchTrustcacheBypass() // base patch
patchSelector24ForcePass()
patchGetTaskAllowForceTrue()
patchSelector42_29Shellcode()
patchDebuggerEntitlementForceTrue()
patchDeveloperModeBypass()
return patches
}
// MARK: - Flat-binary ADRP+ADD string reference search
/// Find all ADRP+ADD pairs in the flat binary that resolve to `targetOff`.
///
/// TXM is a raw flat binary (no Mach-O), so we cannot use a pre-built ADRP index.
/// This mirrors the Python `_find_refs_to_offset` full linear scan.
///
/// Returns an array of `(adrpOff, addOff)` pairs.
private func findRefsToOffset(_ targetOff: Int) -> [(adrpOff: Int, addOff: Int)] {
let size = buffer.count
var refs: [(Int, Int)] = []
var off = 0
while off + 8 <= size {
let rawA = buffer.readU32(at: off)
let rawB = buffer.readU32(at: off + 4)
// ADRP: bits[31]=1, bits[28:24]=10000
guard rawA & 0x9F00_0000 == 0x9000_0000 else { off += 4; continue }
// ADD immediate (64-bit): bits[31:24] = 0x91
guard rawB & 0xFF80_0000 == 0x9100_0000 else { off += 4; continue }
// Decode ADRP target page
let adrpRd = rawA & 0x1F
let immlo = (rawA >> 29) & 0x3
let immhi = (rawA >> 5) & 0x7FFFF
let imm21 = (immhi << 2) | immlo
// Sign-extend imm21 from 21 bits
let signedImm21 = Int64(Int32(bitPattern: imm21 << 11) >> 11)
let pcPage = UInt64(off) & ~UInt64(0xFFF)
let adrpPage = UInt64(bitPattern: Int64(pcPage) + (signedImm21 << 12))
// Decode ADD: Rn must equal ADRP's Rd; imm12 is the page offset
let addRn = (rawB >> 5) & 0x1F
let addImm12 = (rawB >> 10) & 0xFFF
guard adrpRd == addRn else { off += 4; continue }
let resolved = Int(adrpPage) + Int(addImm12)
if resolved == targetOff {
refs.append((off, off + 4))
}
off += 4
}
return refs
}
/// Find all ADRP+ADD instruction pairs referencing any occurrence of `needle` in the binary.
///
/// Returns `(stringOff, adrpOff, addOff)` tuples mirrors Python `_find_string_refs`.
private func findStringRefs(_ needle: Data) -> [(stringOff: Int, adrpOff: Int, addOff: Int)] {
var results: [(Int, Int, Int)] = []
var seen = Set<Int>()
var search = 0
while let range = buffer.data.range(of: needle, in: search ..< buffer.count) {
let sOff = range.lowerBound
search = sOff + 1
for (adrpOff, addOff) in findRefsToOffset(sOff) {
if !seen.contains(adrpOff) {
seen.insert(adrpOff)
results.append((sOff, adrpOff, addOff))
}
}
}
return results
}
/// Find string refs using a UTF-8 string literal.
private func findStringRefs(_ needle: String) -> [(stringOff: Int, adrpOff: Int, addOff: Int)] {
guard let data = needle.data(using: .utf8) else { return [] }
return findStringRefs(data)
}
// MARK: - Helpers
/// Scan backward from `off` for PACIBSP mirrors Python `_find_func_start`.
private func findFuncStart(_ off: Int, back: Int = 0x1000) -> Int? {
let start = max(0, off - back)
var scan = off & ~3
while scan >= start {
if buffer.readU32(at: scan) == ARM64.pacibspU32 {
return scan
}
scan -= 4
}
return nil
}
/// Find a zero-filled cave of at least `minInsns * 4` bytes mirrors Python `_find_udf_cave`.
///
/// The Python logic:
/// - Scan forward for a run of zero words.
/// - Prefer a run immediately after a branch instruction (with 8-byte safety pad).
/// - Otherwise, return the nearest run to `nearOff`.
private func findUdfCave(minInsns: Int, nearOff: Int? = nil, maxDistance: Int = 0x80000) -> Int? {
let need = minInsns * 4
let size = buffer.count
let searchStart = nearOff.map { max(0, $0 - 0x1000) } ?? 0
let searchEnd = nearOff.map { min(size, $0 + maxDistance) } ?? size
var best: Int? = nil
var bestDist = Int.max
var off = searchStart
let branchMnemonics: Set = ["b", "b.eq", "b.ne", "b.lo", "b.hs", "cbz", "cbnz", "tbz", "tbnz"]
while off < searchEnd {
// Count consecutive zero words starting at off
var run = off
while run < searchEnd && buffer.readU32(at: run) == 0 {
run += 4
}
if run - off >= need {
// Check instruction before the run
if off >= 4, let prev = disasm.disassembleOne(in: buffer.data, at: off - 4) {
if branchMnemonics.contains(prev.mnemonic) {
// 2-word safety gap after the preceding branch
let padded = off + 8
return (padded + need <= run) ? padded : off
}
}
// Not after a branch track nearest to nearOff
if let near = nearOff {
let dist = abs(off - near)
if dist < bestDist {
best = off
bestDist = dist
}
}
}
off = (run > off) ? run + 4 : off + 4
}
return best
}
/// Find the function start of the debugger-gate function containing the
/// `com.apple.private.cs.debugger` BL site mirrors Python `_find_debugger_gate_func_start`.
///
/// Pattern (at BL site):
/// [scan-8] mov x0, #0
/// [scan-4] mov x2, #0
/// [scan+0] bl <entitlement_check>
/// [scan+4] tbnz w0, #0, <...>
private func findDebuggerGateFuncStart() -> Int? {
let refs = findStringRefs(Data("com.apple.private.cs.debugger".utf8))
var starts = Set<Int>()
for (_, _, addOff) in refs {
let scanEnd = min(addOff + 0x20, buffer.count - 8)
var scan = addOff
while scan < scanEnd {
guard
let i = disasm.disassembleOne(in: buffer.data, at: scan),
let n = disasm.disassembleOne(in: buffer.data, at: scan + 4),
scan >= 8,
let p1 = disasm.disassembleOne(in: buffer.data, at: scan - 4),
let p2 = disasm.disassembleOne(in: buffer.data, at: scan - 8)
else { scan += 4; continue }
let tbnzOk = n.mnemonic == "tbnz" && n.operandString.hasPrefix("w0, #0,")
let p1ok = p1.mnemonic == "mov" && p1.operandString == "x2, #0"
let p2ok = p2.mnemonic == "mov" && p2.operandString == "x0, #0"
if i.mnemonic == "bl", tbnzOk, p1ok, p2ok {
if let fs = findFuncStart(scan) {
starts.insert(fs)
}
}
scan += 4
}
}
guard starts.count == 1 else { return nil }
return starts.first
}
// MARK: - Dev Patches
/// Patch selector24 handler to return 0xA1 (PASS) immediately.
///
/// Inserts `mov w0, #0xa1 ; b <epilogue>` right after the prologue,
/// skipping validation while preserving the stack frame.
func patchSelector24ForcePass() {
let size = buffer.count
// Scan for any `mov w0, #0xa1` in the binary
var off = 0
while off + 4 <= size {
guard let ins = disasm.disassembleOne(in: buffer.data, at: off),
ins.mnemonic == "mov", ins.operandString == "w0, #0xa1"
else { off += 4; continue }
guard let funcStart = findFuncStart(off) else { off += 4; continue }
// Verify this is selector24 by searching for the characteristic
// LDR X1,[Xn,#0x38] / ADD X2,... / BL / LDP pattern in [funcStart, off)
var patternFound = false
var scan = funcStart
while scan + 12 < off {
guard
let i0 = disasm.disassembleOne(in: buffer.data, at: scan),
let i1 = disasm.disassembleOne(in: buffer.data, at: scan + 4),
let i2 = disasm.disassembleOne(in: buffer.data, at: scan + 8),
let i3 = disasm.disassembleOne(in: buffer.data, at: scan + 12)
else { scan += 4; continue }
let ldrOk = i0.mnemonic == "ldr"
&& i0.operandString.contains("x1,")
&& i0.operandString.contains("#0x38]")
let addOk = i1.mnemonic == "add" && i1.operandString.hasPrefix("x2,")
let blOk = i2.mnemonic == "bl"
let ldpOk = i3.mnemonic == "ldp"
if ldrOk, addOk, blOk, ldpOk {
patternFound = true
break
}
scan += 4
}
guard patternFound else { off += 4; continue }
// Find prologue end: scan for `add x29, sp, #imm`
var bodyStart: Int? = nil
var p = funcStart + 4
while p < funcStart + 0x30 {
if let pi = disasm.disassembleOne(in: buffer.data, at: p),
pi.mnemonic == "add", pi.operandString.hasPrefix("x29, sp,")
{
bodyStart = p + 4
break
}
p += 4
}
guard let body = bodyStart else {
log(" [-] TXM: selector24 prologue end not found")
return
}
// Find epilogue: scan forward from `off` for retab/ret,
// then walk back for `ldp x29, x30, ...`
var epilogue: Int? = nil
var r = off
while r < min(off + 0x200, size) {
if let ri = disasm.disassembleOne(in: buffer.data, at: r),
ri.mnemonic == "retab" || ri.mnemonic == "ret"
{
var e = r - 4
while e > max(r - 0x20, funcStart) {
if let ei = disasm.disassembleOne(in: buffer.data, at: e),
ei.mnemonic == "ldp", ei.operandString.contains("x29, x30")
{
epilogue = e
break
}
e -= 4
}
break
}
r += 4
}
guard let epilogueOff = epilogue else {
log(" [-] TXM: selector24 epilogue not found")
return
}
emit(body, ARM64.movW0_0xA1,
patchID: "txm_dev.selector24_bypass_mov",
description: "selector24 bypass: mov w0, #0xa1 (PASS)")
guard let bInsn = ARM64Encoder.encodeB(from: body + 4, to: epilogueOff) else {
log(" [-] TXM: selector24 branch encoding failed")
return
}
emit(body + 4, bInsn,
patchID: "txm_dev.selector24_bypass_b",
description: "selector24 bypass: b epilogue")
return
}
log(" [-] TXM: selector24 handler not found")
}
/// Force get-task-allow entitlement check to return true (BL mov x0, #1).
func patchGetTaskAllowForceTrue() {
let refs = findStringRefs(Data("get-task-allow".utf8))
guard !refs.isEmpty else {
log(" [-] TXM: get-task-allow string refs not found")
return
}
var cands: [Int] = []
for (_, _, addOff) in refs {
let scanEnd = min(addOff + 0x20, buffer.count - 4)
var scan = addOff
while scan < scanEnd {
guard
let i = disasm.disassembleOne(in: buffer.data, at: scan),
let n = disasm.disassembleOne(in: buffer.data, at: scan + 4)
else { scan += 4; continue }
if i.mnemonic == "bl",
n.mnemonic == "tbnz",
n.operandString.hasPrefix("w0, #0,")
{
cands.append(scan)
}
scan += 4
}
}
guard cands.count == 1 else {
log(" [-] TXM: expected 1 get-task-allow BL site, found \(cands.count)")
return
}
emit(cands[0], ARM64.movX0_1,
patchID: "txm_dev.get_task_allow",
description: "get-task-allow: bl -> mov x0,#1")
}
/// Selector 42|29 patch via dynamic cave shellcode + branch redirect.
///
/// Shellcode (6 instructions in a zero-filled cave):
/// nop (safety padding)
/// mov x0, #1
/// strb w0, [x20, #0x30]
/// mov x0, x20
/// b <stub_off + 4> (return to original flow)
func patchSelector42_29Shellcode() {
guard let fn = findDebuggerGateFuncStart() else {
log(" [-] TXM: debugger-gate function not found (selector42|29)")
return
}
// Find the stub: bti j; mov x0,x20; bl <fn>; mov x1,x21; mov x2,x22; bl <fn>; b ...
var stubs: [Int] = []
let size = buffer.count
var off = 4
while off + 24 <= size {
guard
let p = disasm.disassembleOne(in: buffer.data, at: off - 4),
let i0 = disasm.disassembleOne(in: buffer.data, at: off),
let i1 = disasm.disassembleOne(in: buffer.data, at: off + 4),
let i2 = disasm.disassembleOne(in: buffer.data, at: off + 8),
let i3 = disasm.disassembleOne(in: buffer.data, at: off + 12),
let i4 = disasm.disassembleOne(in: buffer.data, at: off + 16),
let i5 = disasm.disassembleOne(in: buffer.data, at: off + 20)
else { off += 4; continue }
guard p.mnemonic == "bti", p.operandString == "j" else { off += 4; continue }
guard i0.mnemonic == "mov", i0.operandString == "x0, x20" else { off += 4; continue }
guard i1.mnemonic == "bl" else { off += 4; continue }
guard i2.mnemonic == "mov", i2.operandString == "x1, x21" else { off += 4; continue }
guard i3.mnemonic == "mov", i3.operandString == "x2, x22" else { off += 4; continue }
guard i4.mnemonic == "bl" else { off += 4; continue }
guard i5.mnemonic == "b" else { off += 4; continue }
// i4's branch target must point to fn (the debugger-gate function)
let i4u32 = buffer.readU32(at: off + 16)
if let tgt = ARM64Encoder.decodeBranchTarget(insn: i4u32, pc: UInt64(off + 16)),
Int(tgt) == fn
{
stubs.append(off)
}
off += 4
}
guard stubs.count == 1 else {
log(" [-] TXM: selector42|29 stub expected 1, found \(stubs.count)")
return
}
let stubOff = stubs[0]
guard let cave = findUdfCave(minInsns: 6, nearOff: stubOff) else {
log(" [-] TXM: no UDF cave found for selector42|29 shellcode")
return
}
// Redirect stub entry to shellcode cave
guard let branchToShellcode = ARM64Encoder.encodeB(from: stubOff, to: cave) else {
log(" [-] TXM: selector42|29 branch-to-cave encoding failed")
return
}
emit(stubOff, branchToShellcode,
patchID: "txm_dev.sel42_29_branch",
description: "selector42|29: branch to shellcode")
// Shellcode body at cave
emit(cave, ARM64.nop, patchID: "txm_dev.sel42_29_shell_nop", description: "selector42|29 shellcode pad: udf -> nop")
emit(cave + 4, ARM64.movX0_1, patchID: "txm_dev.sel42_29_shell_mov1", description: "selector42|29 shellcode: mov x0,#1")
emit(cave + 8, ARM64.strbW0X20_30, patchID: "txm_dev.sel42_29_shell_strb", description: "selector42|29 shellcode: strb w0,[x20,#0x30]")
emit(cave + 12, ARM64.movX0X20, patchID: "txm_dev.sel42_29_shell_mov20", description: "selector42|29 shellcode: mov x0,x20")
// Branch back to stub_off + 4 (skip the redirected first instruction)
guard let branchBack = ARM64Encoder.encodeB(from: cave + 16, to: stubOff + 4) else {
log(" [-] TXM: selector42|29 branch-back encoding failed")
return
}
emit(cave + 16, branchBack,
patchID: "txm_dev.sel42_29_shell_ret",
description: "selector42|29 shellcode: branch back")
}
/// Force debugger entitlement check to return true (BL mov w0, #1).
///
/// Pattern (at BL site):
/// [scan-8] mov x0, #0
/// [scan-4] mov x2, #0
/// [scan+0] bl <entitlement_check>
/// [scan+4] tbnz w0, #0, <...>
func patchDebuggerEntitlementForceTrue() {
let refs = findStringRefs(Data("com.apple.private.cs.debugger".utf8))
guard !refs.isEmpty else {
log(" [-] TXM: debugger refs not found")
return
}
var cands: [Int] = []
for (_, _, addOff) in refs {
let scanEnd = min(addOff + 0x20, buffer.count - 4)
var scan = addOff
while scan < scanEnd {
guard
scan >= 8,
let i = disasm.disassembleOne(in: buffer.data, at: scan),
let n = disasm.disassembleOne(in: buffer.data, at: scan + 4),
let p1 = disasm.disassembleOne(in: buffer.data, at: scan - 4),
let p2 = disasm.disassembleOne(in: buffer.data, at: scan - 8)
else { scan += 4; continue }
if i.mnemonic == "bl",
n.mnemonic == "tbnz",
n.operandString.hasPrefix("w0, #0,"),
p1.mnemonic == "mov", p1.operandString == "x2, #0",
p2.mnemonic == "mov", p2.operandString == "x0, #0"
{
cands.append(scan)
}
scan += 4
}
}
guard cands.count == 1 else {
log(" [-] TXM: expected 1 debugger BL site, found \(cands.count)")
return
}
emit(cands[0], ARM64.movW0_1,
patchID: "txm_dev.debugger_entitlement",
description: "debugger entitlement: bl -> mov w0,#1")
}
/// Developer-mode bypass: NOP conditional guard before deny log path.
///
/// Finds `tbz/tbnz/cbz/cbnz w9, #0, <...>` just before the
/// "developer mode enabled due to system policy configuration" string ref,
/// then NOPs it.
func patchDeveloperModeBypass() {
let needle = "developer mode enabled due to system policy configuration"
let refs = findStringRefs(Data(needle.utf8))
guard !refs.isEmpty else {
log(" [-] TXM: developer-mode string ref not found")
return
}
let guardMnemonics: Set = ["tbz", "tbnz", "cbz", "cbnz"]
var cands: [Int] = []
for (_, _, addOff) in refs {
var back = addOff - 4
while back > max(addOff - 0x20, 0) {
guard let ins = disasm.disassembleOne(in: buffer.data, at: back) else { back -= 4; continue }
if guardMnemonics.contains(ins.mnemonic),
ins.operandString.hasPrefix("w9, #0,")
{
cands.append(back)
}
back -= 4
}
}
guard cands.count == 1 else {
log(" [-] TXM: expected 1 developer mode guard, found \(cands.count)")
return
}
emit(cands[0], ARM64.nop,
patchID: "txm_dev.developer_mode_bypass",
description: "developer mode bypass")
}
}

View File

@@ -0,0 +1,184 @@
// TXMPatcher.swift TXM (Trusted Execution Monitor) patcher.
//
// Implements the trustcache bypass patch.
// Python source: scripts/patchers/txm.py
import Foundation
/// Patcher for TXM trustcache bypass.
///
/// Patches:
/// 1. Trustcache binary-search BL mov x0, #0
/// (in the AMFI cert verification function identified by the
/// unique constant 0x2446 loaded into w19)
public class TXMPatcher: Patcher {
public let component = "txm"
public let verbose: Bool
let buffer: BinaryBuffer
let disasm = ARM64Disassembler()
var patches: [PatchRecord] = []
public init(data: Data, verbose: Bool = true) {
buffer = BinaryBuffer(data)
self.verbose = verbose
}
public func findAll() throws -> [PatchRecord] {
patches = []
try patchTrustcacheBypass()
return patches
}
@discardableResult
public func apply() throws -> Int {
let _ = try findAll()
for record in patches {
buffer.writeBytes(at: record.fileOffset, bytes: record.patchedBytes)
}
if verbose, !patches.isEmpty {
print("\n [\(patches.count) TXM patches applied]")
}
return patches.count
}
public var patchedData: Data {
buffer.data
}
// MARK: - Emit
func emit(_ offset: Int, _ patchBytes: Data, patchID: String, description: String) {
let originalBytes = buffer.readBytes(at: offset, count: patchBytes.count)
let beforeInsn = disasm.disassembleOne(in: buffer.original, at: offset)
let afterInsn = disasm.disassembleOne(patchBytes, at: UInt64(offset))
let beforeStr = beforeInsn.map { "\($0.mnemonic) \($0.operandString)" } ?? "???"
let afterStr = afterInsn.map { "\($0.mnemonic) \($0.operandString)" } ?? "???"
let record = PatchRecord(
patchID: patchID,
component: component,
fileOffset: offset,
virtualAddress: nil,
originalBytes: originalBytes,
patchedBytes: patchBytes,
beforeDisasm: beforeStr,
afterDisasm: afterStr,
description: description
)
patches.append(record)
if verbose {
print(" 0x\(String(format: "%06X", offset)): \(beforeStr)\(afterStr) [\(description)]")
}
}
}
// MARK: - Trustcache Bypass
extension TXMPatcher {
//
// Trustcache bypass
//
// The AMFI cert verification function has a unique constant:
// mov w19, #0x2446 (encoded as 0x528488D3)
//
// Within that function, a binary search calls a hash-compare
// function with SHA-1 size:
// mov w2, #0x14; bl <hash_cmp>; cbz w0, <match>
// followed by:
// tbnz w0, #0x1f, <lower_half> (sign bit = search direction)
//
// Patch: bl <hash_cmp> mov x0, #0
// This makes cbz always branch to <match>, bypassing the
// trustcache lookup entirely.
//
func patchTrustcacheBypass() throws {
// Step 1: Find the unique function marker (mov w19, #0x2446)
// Encoding: MOVZ W19, #0x2446 = 0x528488D3
let markerBytes = ARM64.encodeU32(0x5284_88D3)
let markerLocs = buffer.findAll(markerBytes)
guard markerLocs.count == 1 else {
if verbose {
print(" [-] TXM: expected 1 'mov w19, #0x2446', found \(markerLocs.count)")
}
throw PatcherError.patchSiteNotFound(
"expected exactly 1 'mov w19, #0x2446' marker, found \(markerLocs.count)"
)
}
let markerOff = markerLocs[0]
// Step 2: Find the containing function scan back for PACIBSP
let funcStart = findFunctionStart(from: markerOff)
guard let funcStart else {
if verbose {
print(" [-] TXM: function start not found")
}
throw PatcherError.patchSiteNotFound("PACIBSP not found before marker at 0x\(String(format: "%X", markerOff))")
}
// Step 3: Within the function, find: mov w2, #0x14; bl; cbz w0; tbnz w0, #0x1f
let funcEnd = min(funcStart + 0x2000, buffer.count)
let funcLen = funcEnd - funcStart
guard funcLen > 0 else {
throw PatcherError.patchSiteNotFound("function range is empty")
}
let funcData = buffer.readBytes(at: funcStart, count: funcLen)
let insns = disasm.disassemble(funcData, at: UInt64(funcStart))
for i in 0 ..< insns.count {
let ins = insns[i]
guard ins.mnemonic == "mov", ins.operandString == "w2, #0x14" else { continue }
guard i + 3 < insns.count else { continue }
let blIns = insns[i + 1]
let cbzIns = insns[i + 2]
let tbnzIns = insns[i + 3]
guard blIns.mnemonic == "bl" else { continue }
guard cbzIns.mnemonic == "cbz", cbzIns.operandString.hasPrefix("w0") else { continue }
guard tbnzIns.mnemonic == "tbnz" || tbnzIns.mnemonic == "tbz",
tbnzIns.operandString.contains("#0x1f") else { continue }
// Found the pattern patch the BL to mov x0, #0
let blOffset = Int(blIns.address)
emit(
blOffset,
ARM64.movX0_0,
patchID: "txm.trustcache_bypass",
description: "trustcache bypass: bl → mov x0, #0"
)
return
}
if verbose {
print(" [-] TXM: binary search pattern not found in function")
}
throw PatcherError.patchSiteNotFound("mov w2, #0x14; bl; cbz w0; tbnz w0, #0x1f pattern not found")
}
// MARK: - Helpers
/// Scan backward from `offset` (aligned to 4 bytes) for a PACIBSP instruction.
/// Searches up to 0x200 bytes back, matching the Python implementation.
private func findFunctionStart(from offset: Int) -> Int? {
let pacibspU32 = ARM64.pacibspU32
var scan = offset & ~3
let limit = max(0, offset - 0x200)
while scan >= limit {
let insn = buffer.readU32(at: scan)
if insn == pacibspU32 {
return scan
}
if scan == 0 { break }
scan -= 4
}
return nil
}
}

View File

@@ -7,7 +7,7 @@ struct VPhoneAppBrowserView: View {
VStack(spacing: 0) {
filterBar
Divider()
if model.isLoading && model.apps.isEmpty {
if model.isLoading, model.apps.isEmpty {
ProgressView()
.frame(maxWidth: .infinity, maxHeight: .infinity)
} else if model.filteredApps.isEmpty {

View File

@@ -0,0 +1,174 @@
// FirmwarePatcherTests.swift Tests for ARM64 constants, encoders, and round-trip verification.
@testable import FirmwarePatcher
import Foundation
import Testing
struct ARM64ConstantTests {
let disasm = ARM64Disassembler()
func verifyConstant(_ data: Data, expectedMnemonic: String, file _: String = #file, line _: Int = #line) {
let insn = disasm.disassembleOne(data, at: 0)
#expect(insn != nil, "Failed to disassemble constant")
#expect(insn?.mnemonic == expectedMnemonic,
"Expected \(expectedMnemonic), got \(insn?.mnemonic ?? "nil")")
}
@Test func nop() {
verifyConstant(ARM64.nop, expectedMnemonic: "nop")
}
@Test func ret() {
verifyConstant(ARM64.ret, expectedMnemonic: "ret")
}
@Test func retaa() {
verifyConstant(ARM64.retaa, expectedMnemonic: "retaa")
}
@Test func retab() {
verifyConstant(ARM64.retab, expectedMnemonic: "retab")
}
@Test func pacibsp() {
// PACIBSP is encoded as HINT #27, capstone may show it as "pacibsp" or "hint"
let insn = disasm.disassembleOne(ARM64.pacibsp, at: 0)
#expect(insn != nil)
#expect(insn?.mnemonic == "pacibsp" || insn?.mnemonic == "hint")
}
@Test func movX0_0() {
let insn = disasm.disassembleOne(ARM64.movX0_0, at: 0)
#expect(insn != nil)
#expect(insn?.mnemonic == "mov" || insn?.mnemonic == "movz")
}
@Test func movX0_1() {
let insn = disasm.disassembleOne(ARM64.movX0_1, at: 0)
#expect(insn != nil)
#expect(insn?.mnemonic == "mov" || insn?.mnemonic == "movz")
}
@Test func movW0_0() {
let insn = disasm.disassembleOne(ARM64.movW0_0, at: 0)
#expect(insn != nil)
#expect(insn?.mnemonic == "mov" || insn?.mnemonic == "movz")
}
@Test func movW0_1() {
let insn = disasm.disassembleOne(ARM64.movW0_1, at: 0)
#expect(insn != nil)
#expect(insn?.mnemonic == "mov" || insn?.mnemonic == "movz")
}
@Test func cmpW0W0() {
verifyConstant(ARM64.cmpW0W0, expectedMnemonic: "cmp")
}
@Test func cmpX0X0() {
verifyConstant(ARM64.cmpX0X0, expectedMnemonic: "cmp")
}
@Test func movX0X20() {
let insn = disasm.disassembleOne(ARM64.movX0X20, at: 0)
#expect(insn != nil)
#expect(insn?.mnemonic == "mov" || insn?.mnemonic == "orr")
}
@Test func strbW0X20_30() {
verifyConstant(ARM64.strbW0X20_30, expectedMnemonic: "strb")
}
@Test func movW0_0xA1() {
let insn = disasm.disassembleOne(ARM64.movW0_0xA1, at: 0)
#expect(insn != nil)
#expect(insn?.mnemonic == "mov" || insn?.mnemonic == "movz")
}
}
struct ARM64EncoderTests {
let disasm = ARM64Disassembler()
@Test func encodeBForward() throws {
// B from 0x1000 to 0x2000 (forward 0x1000 bytes)
let data = ARM64Encoder.encodeB(from: 0x1000, to: 0x2000)
#expect(data != nil)
let insn = try disasm.disassembleOne(#require(data), at: 0x1000)
#expect(insn?.mnemonic == "b")
}
@Test func encodeBBackward() throws {
// B from 0x2000 to 0x1000 (backward 0x1000 bytes)
let data = ARM64Encoder.encodeB(from: 0x2000, to: 0x1000)
#expect(data != nil)
let insn = try disasm.disassembleOne(#require(data), at: 0x2000)
#expect(insn?.mnemonic == "b")
}
@Test func encodeBLForward() throws {
let data = ARM64Encoder.encodeBL(from: 0x1000, to: 0x2000)
#expect(data != nil)
let insn = try disasm.disassembleOne(#require(data), at: 0x1000)
#expect(insn?.mnemonic == "bl")
}
@Test func decodeBranchTarget() throws {
// Encode a B, then decode and verify the target matches
let from: UInt64 = 0x10000
let to: UInt64 = 0x20000
let data = try #require(ARM64Encoder.encodeB(from: Int(from), to: Int(to)))
let insn: UInt32 = data.withUnsafeBytes { $0.load(as: UInt32.self) }
let decoded = ARM64Encoder.decodeBranchTarget(insn: insn, pc: from)
#expect(decoded == to)
}
@Test func encodeBOutOfRange() {
// Try to encode a branch that's too far (> 128MB)
let data = ARM64Encoder.encodeB(from: 0, to: 0x1000_0000)
#expect(data == nil)
}
@Test func encodeADRP() throws {
let data = ARM64Encoder.encodeADRP(rd: 0, pc: 0x1000, target: 0x2000)
#expect(data != nil)
let insn = try disasm.disassembleOne(#require(data), at: 0x1000)
#expect(insn?.mnemonic == "adrp")
}
@Test func encodeAddImm12() throws {
let data = ARM64Encoder.encodeAddImm12(rd: 0, rn: 0, imm12: 0x100)
#expect(data != nil)
let insn = try disasm.disassembleOne(#require(data), at: 0)
#expect(insn?.mnemonic == "add")
}
}
struct BinaryBufferTests {
@Test func readWriteU32() {
let data = Data(repeating: 0, count: 16)
let buf = BinaryBuffer(data)
buf.writeU32(at: 4, value: 0xDEAD_BEEF)
#expect(buf.readU32(at: 4) == 0xDEAD_BEEF)
}
@Test func findString() {
let testStr = "Hello, World!\0Extra"
let data = Data(testStr.utf8)
let buf = BinaryBuffer(data)
let offset = buf.findString("Hello, World!")
#expect(offset == 0)
}
@Test func findAll() {
var data = Data(repeating: 0, count: 32)
// Write NOP at offset 8 and 20
let nop = ARM64.nop
data.replaceSubrange(8 ..< 12, with: nop)
data.replaceSubrange(20 ..< 24, with: nop)
let buf = BinaryBuffer(data)
let offsets = buf.findAll(nop)
#expect(offsets.count == 2)
#expect(offsets.contains(8))
#expect(offsets.contains(20))
}
}

View File

@@ -0,0 +1,201 @@
// PatchComparisonTests.swift Compare Swift patcher output against Python reference patches.
//
// Loads firmware binaries (pre-extracted raw payloads), runs Swift patchers,
// and verifies byte-exact match with the pre-generated Python reference JSON files.
@testable import FirmwarePatcher
import Foundation
import Testing
// MARK: - Reference patch JSON format
private struct ReferencePatch: Decodable {
let file_offset: Int
let patch_bytes: String
let patch_size: Int
let description: String
let component: String
}
private struct TXMDevReference: Decodable {
let base: [ReferencePatch]
let dev: [ReferencePatch]
}
// MARK: - Test helpers
private let baseDir = URL(fileURLWithPath: #filePath)
.deletingLastPathComponent()
.deletingLastPathComponent()
.deletingLastPathComponent()
.appendingPathComponent("ipsws/patch_refactor_input")
private func loadRawPayload(_ name: String) throws -> Data {
let url = baseDir.appendingPathComponent("raw_payloads/\(name)")
return try Data(contentsOf: url)
}
private func loadReference(_ name: String) throws -> [ReferencePatch] {
let url = baseDir.appendingPathComponent("reference_patches/\(name).json")
let data = try Data(contentsOf: url)
return try JSONDecoder().decode([ReferencePatch].self, from: data)
}
private func comparePatchRecords(
swift swiftPatches: [PatchRecord],
reference refPatches: [ReferencePatch],
component: String
) {
// Sort both by file_offset for stable comparison
let sortedSwift = swiftPatches.sorted { $0.fileOffset < $1.fileOffset }
let sortedRef = refPatches.sorted { $0.file_offset < $1.file_offset }
// Compare counts
if swiftPatches.count != refPatches.count {
print("\(component): patch count mismatch — Swift=\(swiftPatches.count), Python=\(refPatches.count)")
// Show what Swift found
print(" Swift patches:")
for p in sortedSwift {
print(" 0x\(String(format: "%06X", p.fileOffset)) \(p.patchedBytes.hex) [\(p.patchID)]")
}
print(" Python patches:")
for p in sortedRef {
print(" 0x\(String(format: "%06X", p.file_offset)) \(p.patch_bytes) [\(p.description)]")
}
}
#expect(swiftPatches.count == refPatches.count,
"\(component): patch count mismatch — Swift=\(swiftPatches.count), Python=\(refPatches.count)")
let count = min(sortedSwift.count, sortedRef.count)
var mismatches = 0
for i in 0 ..< count {
let s = sortedSwift[i]
let r = sortedRef[i]
let swiftHex = s.patchedBytes.hex
if s.fileOffset != r.file_offset || swiftHex != r.patch_bytes {
mismatches += 1
print("\(component) patch \(i): Swift=0x\(String(format: "%X", s.fileOffset)):\(swiftHex) vs Python=0x\(String(format: "%X", r.file_offset)):\(r.patch_bytes) [\(r.description)]")
}
#expect(s.fileOffset == r.file_offset,
"\(component) patch \(i): offset mismatch — Swift=0x\(String(format: "%X", s.fileOffset)), Python=0x\(String(format: "%X", r.file_offset)) [\(r.description)]")
#expect(swiftHex == r.patch_bytes,
"\(component) patch \(i) @ 0x\(String(format: "%X", s.fileOffset)): bytes mismatch — Swift=\(swiftHex), Python=\(r.patch_bytes) [\(r.description)]")
}
if mismatches == 0, swiftPatches.count == refPatches.count {
print("\(component): all \(count) patches match exactly")
}
}
// MARK: - AVPBooter Tests
struct AVPBooterComparisonTests {
@Test func compareAVPBooter() throws {
let data = try loadRawPayload("avpbooter.bin")
let patcher = AVPBooterPatcher(data: data, verbose: false)
let swiftPatches = try patcher.findAll()
let refPatches = try loadReference("avpbooter")
comparePatchRecords(swift: swiftPatches, reference: refPatches, component: "avpbooter")
}
}
// MARK: - iBoot Tests
struct IBSSComparisonTests {
@Test func compareIBSS() throws {
let data = try loadRawPayload("ibss.bin")
let patcher = IBootPatcher(data: data, mode: .ibss, verbose: false)
let swiftPatches = try patcher.findAll()
let refPatches = try loadReference("ibss")
comparePatchRecords(swift: swiftPatches, reference: refPatches, component: "ibss")
}
}
struct IBECComparisonTests {
@Test func compareIBEC() throws {
let data = try loadRawPayload("ibec.bin")
let patcher = IBootPatcher(data: data, mode: .ibec, verbose: false)
let swiftPatches = try patcher.findAll()
let refPatches = try loadReference("ibec")
comparePatchRecords(swift: swiftPatches, reference: refPatches, component: "ibec")
}
}
struct LLBComparisonTests {
@Test func compareLLB() throws {
let data = try loadRawPayload("llb.bin")
let patcher = IBootPatcher(data: data, mode: .llb, verbose: false)
let swiftPatches = try patcher.findAll()
let refPatches = try loadReference("llb")
comparePatchRecords(swift: swiftPatches, reference: refPatches, component: "llb")
}
}
// MARK: - TXM Tests
struct TXMComparisonTests {
@Test func compareTXM() throws {
let data = try loadRawPayload("txm.bin")
let patcher = TXMPatcher(data: data, verbose: false)
let swiftPatches = try patcher.findAll()
let refPatches = try loadReference("txm")
comparePatchRecords(swift: swiftPatches, reference: refPatches, component: "txm")
}
}
struct TXMDevComparisonTests {
@Test func compareTXMDev() throws {
let url = baseDir.appendingPathComponent("reference_patches/txm_dev.json")
let jsonData = try Data(contentsOf: url)
let ref = try JSONDecoder().decode(TXMDevReference.self, from: jsonData)
let data = try loadRawPayload("txm.bin")
let patcher = TXMDevPatcher(data: data, verbose: false)
let swiftPatches = try patcher.findAll()
// TXM dev includes base + dev patches
let allRef = ref.base + ref.dev
comparePatchRecords(swift: swiftPatches, reference: allRef, component: "txm_dev")
}
}
// MARK: - Kernel Tests
struct KernelcacheComparisonTests {
@Test func compareKernelcache() throws {
let data = try loadRawPayload("kernelcache.bin")
let patcher = KernelPatcher(data: data, verbose: false)
let swiftPatches = try patcher.findAll()
let refPatches = try loadReference("kernelcache")
comparePatchRecords(swift: swiftPatches, reference: refPatches, component: "kernelcache")
}
}
// MARK: - JB Tests
struct IBSSJBComparisonTests {
@Test func compareIBSSJB() throws {
let data = try loadRawPayload("ibss.bin")
let patcher = IBootJBPatcher(data: data, mode: .ibss, verbose: false)
// IBootJBPatcher only adds JB-specific patches on top of base
// We need to run findAll() first (base patches), then add JB patch
patcher.patches = []
patcher.patchSkipGenerateNonce()
let refPatches = try loadReference("ibss_jb")
comparePatchRecords(swift: patcher.patches, reference: refPatches, component: "ibss_jb")
}
}
struct KernelcacheJBComparisonTests {
@Test func compareKernelcacheJB() throws {
let data = try loadRawPayload("kernelcache.bin")
let patcher = KernelJBPatcher(data: data, verbose: false)
let swiftPatches = try patcher.findAll()
let refPatches = try loadReference("kernelcache_jb")
comparePatchRecords(swift: swiftPatches, reference: refPatches, component: "kernelcache_jb")
}
}

View File

@@ -0,0 +1,37 @@
@testable import FirmwarePatcher
import Foundation
import Testing
struct VerboseJBDebug {
@Test func debugFailingPatches() throws {
let baseDir = URL(fileURLWithPath: #filePath)
.deletingLastPathComponent()
.deletingLastPathComponent()
.deletingLastPathComponent()
.appendingPathComponent("ipsws/patch_refactor_input")
let data = try Data(contentsOf: baseDir.appendingPathComponent("raw_payloads/kernelcache.bin"))
let patcher = KernelJBPatcher(data: data, verbose: true)
// Initialize patcher state (same as findAll() but without running patches)
try patcher.parseMachO()
patcher.buildADRPIndex()
patcher.buildBLIndex()
patcher.buildSymbolTable()
print("=== HOOK CRED LABEL ===")
let r1 = patcher.patchHookCredLabelUpdateExecve()
print("Result: \(r1)")
print("\n=== TASK CONVERSION ===")
let r2 = patcher.patchTaskConversionEvalInternal()
print("Result: \(r2)")
print("\n=== BSD INIT AUTH ===")
let r3 = patcher.patchBsdInitAuth()
print("Result: \(r3)")
print("\n=== IOUC MACF ===")
let r4 = patcher.patchIoucFailedMacf()
print("Result: \(r4)")
}
}

1
vendor/Dynamic vendored Submodule

Submodule vendor/Dynamic added at 772883073d

1
vendor/MachOKit vendored Submodule

Submodule vendor/MachOKit added at eb30537f9b

1
vendor/libcapstone-spm vendored Submodule

Submodule vendor/libcapstone-spm added at ea98aa0a31

1
vendor/libimg4-spm vendored Submodule

Submodule vendor/libimg4-spm added at 1ed44d4b37

1
vendor/swift-argument-parser vendored Submodule