Squash merge JB kernel runtime optimizations and testing workflow

This commit is contained in:
Lakr
2026-03-05 00:33:18 +08:00
parent 89f0f698af
commit fdc3e28ae3
20 changed files with 1574 additions and 277 deletions

View File

@@ -8,6 +8,7 @@ CPU ?= 8
MEMORY ?= 8192 MEMORY ?= 8192
DISK_SIZE ?= 64 DISK_SIZE ?= 64
CFW_INPUT ?= cfw_input CFW_INPUT ?= cfw_input
BASE_PATCH ?=
# ─── Build info ────────────────────────────────────────────────── # ─── Build info ──────────────────────────────────────────────────
GIT_HASH := $(shell git rev-parse --short HEAD 2>/dev/null || echo "unknown") GIT_HASH := $(shell git rev-parse --short HEAD 2>/dev/null || echo "unknown")
@@ -71,13 +72,18 @@ help:
@echo "Ramdisk:" @echo "Ramdisk:"
@echo " make ramdisk_build Build signed SSH ramdisk" @echo " make ramdisk_build Build signed SSH ramdisk"
@echo " make ramdisk_send Send ramdisk to device" @echo " make ramdisk_send Send ramdisk to device"
@echo " make testing_ramdisk_build Build testing boot chain (no SSH, no CFW)"
@echo " make testing_ramdisk_send Send testing boot chain to device"
@echo " make testing_checkpoint_save Save kernel checkpoint for patch testing"
@echo " Options: BASE_PATCH=normal|dev|jb"
@echo " make testing_exec Quick test flow (prepare -> patch_jb -> build/send -> boot_dfu)"
@echo "" @echo ""
@echo "CFW:" @echo "CFW:"
@echo " make cfw_install Install CFW mods via SSH" @echo " make cfw_install Install CFW mods via SSH"
@echo " make cfw_install_dev Install CFW mods via SSH (dev mode)" @echo " make cfw_install_dev Install CFW mods via SSH (dev mode)"
@echo " make cfw_install_jb Install CFW + JB extensions (jetsam/procursus/basebin)" @echo " make cfw_install_jb Install CFW + JB extensions (jetsam/procursus/basebin)"
@echo "" @echo ""
@echo "Variables: VM_DIR=$(VM_DIR) CPU=$(CPU) MEMORY=$(MEMORY) DISK_SIZE=$(DISK_SIZE)" @echo "Variables: VM_DIR=$(VM_DIR) CPU=$(CPU) MEMORY=$(MEMORY) DISK_SIZE=$(DISK_SIZE) BASE_PATCH=$(if $(BASE_PATCH),$(BASE_PATCH),jb)"
# ═══════════════════════════════════════════════════════════════════ # ═══════════════════════════════════════════════════════════════════
# Setup # Setup
@@ -217,7 +223,7 @@ restore:
# Ramdisk # Ramdisk
# ═══════════════════════════════════════════════════════════════════ # ═══════════════════════════════════════════════════════════════════
.PHONY: ramdisk_build ramdisk_send .PHONY: ramdisk_build ramdisk_send testing_ramdisk_build testing_ramdisk_send testing_checkpoint_save testing_exec testing_kernel_patch testing_c23_bisect
ramdisk_build: ramdisk_build:
cd $(VM_DIR) && $(PYTHON) "$(CURDIR)/$(SCRIPTS)/ramdisk_build.py" . cd $(VM_DIR) && $(PYTHON) "$(CURDIR)/$(SCRIPTS)/ramdisk_build.py" .
@@ -225,6 +231,30 @@ ramdisk_build:
ramdisk_send: ramdisk_send:
cd $(VM_DIR) && IRECOVERY="$(CURDIR)/$(IRECOVERY)" zsh "$(CURDIR)/$(SCRIPTS)/ramdisk_send.sh" cd $(VM_DIR) && IRECOVERY="$(CURDIR)/$(IRECOVERY)" zsh "$(CURDIR)/$(SCRIPTS)/ramdisk_send.sh"
testing_ramdisk_build:
cd $(VM_DIR) && $(PYTHON) "$(CURDIR)/$(SCRIPTS)/testing_ramdisk_build.py" .
testing_ramdisk_send:
cd $(VM_DIR) && IRECOVERY="$(CURDIR)/$(IRECOVERY)" zsh "$(CURDIR)/$(SCRIPTS)/testing_ramdisk_send.sh"
testing_checkpoint_save:
VM_DIR="$(VM_DIR)" BASE_PATCH="$(if $(BASE_PATCH),$(BASE_PATCH),jb)" zsh "$(CURDIR)/$(SCRIPTS)/testing_checkpoint_save.sh"
testing_exec:
VM_DIR="$(VM_DIR)" zsh "$(CURDIR)/$(SCRIPTS)/testing_exec.sh"
testing_kernel_patch:
@if [ -z "$(strip $(or $(PATCHES),$(PATCH)))" ]; then \
echo "Error: PATCH or PATCHES is required"; \
echo " Example: make testing_kernel_patch PATCH=patch_kcall10"; \
echo " Example: make testing_kernel_patch PATCHES='patch_a patch_b'"; \
exit 1; \
fi
cd $(VM_DIR) && BASE_PATCH="$(if $(BASE_PATCH),$(BASE_PATCH),jb)" $(PYTHON) "$(CURDIR)/$(SCRIPTS)/testing_kernel_patch.py" . --base-patch "$(if $(BASE_PATCH),$(BASE_PATCH),jb)" $(or $(PATCHES),$(PATCH))
testing_c23_bisect:
cd $(VM_DIR) && $(PYTHON) "$(CURDIR)/$(SCRIPTS)/testing_c23_bisect.py" . $(VARIANT)
# ═══════════════════════════════════════════════════════════════════ # ═══════════════════════════════════════════════════════════════════
# CFW # CFW
# ═══════════════════════════════════════════════════════════════════ # ═══════════════════════════════════════════════════════════════════

View File

@@ -17,7 +17,31 @@ From `make boot` serial log:
- Ignition/boot path shows entitlement-like failure: - Ignition/boot path shows entitlement-like failure:
- `handle_get_dev_by_role:13101: disk1s1 This operation needs entitlement` - `handle_get_dev_by_role:13101: disk1s1 This operation needs entitlement`
This is consistent with missing mount-policy bypasses in the running kernel. This indicates failure in APFS role-based device lookup during early boot mount tasks.
## Runtime Evidence (DEV Control Run, 2026-03-04)
From a separate `fw_patch_dev + cfw_install_dev` boot log (not JB):
- `mount-phase-1` succeeded for xART:
- `disk1s3 mount-complete volume xART`
- `/dev/disk1s3 on /private/xarts ...`
- launch progressed to:
- `data-protection`
- `finish-obliteration`
- `detect-installed-roots`
- `mount-phase-2`
Interpretation: APFS boot-mount path can work on this build/kernel family after recent APFS gate changes.
This does **not** prove JB flow is fixed; it is a control signal showing the kernel-side path is not universally broken.
## Flow Separation (Critical)
- The successful `xART mount-complete` / `mount-phase-2` log is from DEV pipeline:
- `fw_patch_dev` + `cfw_install_dev`
- JB pipeline remains:
- `fw_patch_jb` + `cfw_install_jb`
- `cfw_install_jb` does **not** call `cfw_install_dev`; it runs base `cfw_install.sh` first, then JB-only phases.
## Kernel Artifact Checks ## Kernel Artifact Checks
@@ -51,34 +75,129 @@ Interpretation: kernel is base-patched, but critical JB mount/syscall extensions
So restore kernel is modified vs source, but not fully JB-complete. So restore kernel is modified vs source, but not fully JB-complete.
## Root Cause (Current Working Hypothesis) ## IDA Deep-Dive (APFS mount-phase-1 path)
- The kernel used for install/boot is not fully JB-patched. ### 1) Failing function identified
- Missing JB mount-related patches (`___mac_mount`, `_dounmount`) explain:
- remount failure in ramdisk CFW stage - APFS function: `sub_FFFFFE000948EB10` (log name: `handle_get_dev_by_role`)
- mount-phase-1 failures and panic during normal boot. - Trigger string in function:
- `"%s:%d: %s This operation needs entitlement\\n"` (line 13101)
- Caller xref:
- `sub_FFFFFE000947CFE4` dispatches to `sub_FFFFFE000948EB10`
### 2) Gate logic at failure site
The deny path is reached if either check fails:
- Context gate:
- `BL sub_FFFFFE0007CCB994`
- `CBZ X0, deny`
- "Entitlement" gate (APFS role lookup privilege gate):
- `ADRL X1, "com.apple.apfs.get-dev-by-role"`
- `BL sub_FFFFFE000940CFC8`
- `CBZ W0, deny`
- Secondary role-path gate (role == 2 volume-group path):
- `BL sub_FFFFFE000817C240`
- `CBZ W0, deny` (to line 13115 block)
The deny block logs line `13101` and returns failure.
### 3) Patch sites (current vphone600 kernelcache)
- File offsets:
- `0x0248AB50` — context gate branch (`CBZ X0, deny`)
- `0x0248AB64` — role-lookup privilege gate (`CBZ W0, deny`)
- `0x0248AC24` — secondary role==2 deny branch (`CBZ W0, deny`)
- All three patched to `NOP` in the additive APFS patch.
### 4) Additional APFS EPERM(1) return paths in `apfs_vfsop_mount`
Function:
- `sub_FFFFFE0009478848` (`apfs_vfsop_mount`)
Observed EPERM-relevant deny blocks:
- Root-mount privilege deny:
- log string: `"%s:%d: not allowed to mount as root\n"`
- xref site: `0xFFFFFE000947905C`
- error return: sets `W25 = 1`
- Verification-mount privilege deny:
- log string: `"%s:%d: not allowed to do a verification mount of %s (is_suser %s ; uid %d)\n"`
- xref site: `0xFFFFFE0009479CA0`
- error return: sets `W25 = 1`
Important relation to existing Patch 13:
- At `0xFFFFFE0009479044` (same function), current code is `CMP X0, X0` (patched form),
which forces the following `B.EQ` path and should bypass one root privilege check in this region.
- Therefore, if JB still reports `mount_apfs ... Operation not permitted`, remaining EPERM candidates
include other deny branches (including the verification-mount gate path above), not only `handle_get_dev_by_role`.
## Root Cause (Updated, Two-Stage)
Stage 1 (confirmed and mitigated):
- APFS `handle_get_dev_by_role` entitlement/role deny gates were a concrete mount-phase-1 blocker.
- Additive patch now NOPs all three relevant deny branches.
Stage 2 (still under investigation, JB-only):
- DEV control run can pass `mount-phase-1`/`mount-phase-2`.
- JB failures must be analyzed with JB-only artifacts/logs and likely involve JB-only deltas
(launchd dylib injection, BaseBin hooks, or JB preboot/bootstrap interaction), in addition to any remaining kernel checks.
## Mitigation Implemented ## Mitigation Implemented
To reduce install fragility while preserving a JB target kernel: ### A) Ramdisk kernel split (updated implementation)
- `scripts/fw_patch_jb.py` - `scripts/fw_patch_jb.py`
- saves a pre-JB base/dev snapshot: - no longer creates a ramdisk snapshot file
- `kernelcache.research.vphone600.ramdisk`
- `scripts/ramdisk_build.py` - `scripts/ramdisk_build.py`
- derives ramdisk kernel source internally:
- uses legacy `kernelcache.research.vphone600.ramdisk` if present
- otherwise derives from pristine CloudOS `kernelcache.research.vphone600`
under `ipsws/*CloudOS*/` using base `KernelPatcher`
- builds: - builds:
- `Ramdisk/krnl.ramdisk.img4` from the snapshot - `Ramdisk/krnl.ramdisk.img4` from derived/base source
- `Ramdisk/krnl.img4` from post-JB kernel - `Ramdisk/krnl.img4` from post-JB restore kernel
- `scripts/ramdisk_send.sh` - `scripts/ramdisk_send.sh`
- prefers `krnl.ramdisk.img4` when present. - prefers `krnl.ramdisk.img4` when present.
### B) Additive APFS boot-mount gate bypass (new)
- Added new base kernel patch method:
- `KernelPatchApfsMountMixin.patch_apfs_get_dev_by_role_entitlement()`
- Added to base kernel patch sequence in `scripts/patchers/kernel.py`.
- Behavior:
- NOPs three deny branches in `handle_get_dev_by_role`
- does not modify existing filesystem patches (APFS snapshot/seal/graft/mount/sandbox hooks remain unchanged).
### C) JB-only differential identified (for next isolation)
Compared with DEV flow, JB adds unique early-boot risk factors:
- launchd binary gets `LC_LOAD_DYLIB` injection for `/cores/launchdhook.dylib`
- `launchdhook.dylib`/BaseBin environment strings include:
- `JB_ROOT_PATH`
- `JB_TWEAKLOADER_PATH`
- explicit launchdhook startup logs (`hello` / `bye`)
- procursus/bootstrap content is written under preboot hash path (`/mnt5/<hash>/jb-vphone`)
These do not prove causality yet, but they are the primary JB-only candidates after Stage-1 APFS gate mitigation.
## Next Validation ## Next Validation
1. Re-run firmware patch and ramdisk build on the current tree: 1. Kernel/JB isolation run (requested):
- `make fw_patch_jb` - `make fw_patch_jb`
- `make ramdisk_build` - `make ramdisk_build`
- `make ramdisk_send` - `make ramdisk_send`
- `make cfw_install_jb` - run `cfw_install_dev` (not JB) on this JB-patched firmware baseline
2. Verify remount succeeds in JB stage: 2. Compare normal boot result:
- `/dev/disk1s1 -> /mnt1` - If `mount-phase-1/2` succeeds: strong evidence issue is in JB-only userspace phases.
3. Re-test normal boot and confirm no `mount-phase-1 exit(77)` panic. - If it still fails with `EPERM`: continue kernel/APFS deny-path tracing.
3. If step 2 succeeds, add back JB phases incrementally:
- first JB-1 (launchd inject + jetsam patch)
- then JB-2 (preboot bootstrap)
- then JB-3 (BaseBin hooks)
and capture first regression point.

View File

@@ -43,6 +43,7 @@ Three firmware variants are available, each building on the previous:
### TXM ### TXM
TXM patch composition by variant: TXM patch composition by variant:
- Regular: `txm.py` (1 patch). - Regular: `txm.py` (1 patch).
- Dev: `txm.py` (1 patch) + `txm_dev.py` (11 patches) = 12 total. - Dev: `txm.py` (1 patch) + `txm_dev.py` (11 patches) = 12 total.
- JB: same as Dev (selector24 bypass now in `txm_dev.py`, no separate JB patcher). - JB: same as Dev (selector24 bypass now in `txm_dev.py`, no separate JB patcher).
@@ -64,25 +65,26 @@ TXM patch composition by variant:
### Kernelcache ### Kernelcache
Regular and Dev share the same 25 base kernel patches. JB adds 34 additional patches. Regular and Dev share the same 28 base kernel patches. JB adds 34 additional patches.
#### Base patches (all variants) #### Base patches (all variants)
| # | Patch | Function | Purpose | Regular | Dev | JB | | # | Patch | Function | Purpose | Regular | Dev | JB |
| ----- | -------------------------- | -------------------------------- | ---------------------------------------- | :-----: | :-: | :-: | | ----- | -------------------------- | -------------------------------- | ----------------------------------------------------------------------------------------- | :-----: | :-: | :-: |
| 1 | NOP `tbnz w8,#5` | `_apfs_vfsop_mount` | Skip "root snapshot" sealed volume check | Y | Y | Y | | 1 | NOP `tbnz w8,#5` | `_apfs_vfsop_mount` | Skip "root snapshot" sealed volume check | Y | Y | Y |
| 2 | NOP conditional | `_authapfs_seal_is_broken` | Skip "root volume seal" panic | Y | Y | Y | | 2 | NOP conditional | `_authapfs_seal_is_broken` | Skip "root volume seal" panic | Y | Y | Y |
| 3 | NOP conditional | `_bsd_init` | Skip "rootvp not authenticated" panic | Y | Y | Y | | 3 | NOP conditional | `_bsd_init` | Skip "rootvp not authenticated" panic | Y | Y | Y |
| 45 | `mov w0,#0; ret` | `_proc_check_launch_constraints` | Bypass launch constraints | Y | Y | Y | | 45 | `mov w0,#0; ret` | `_proc_check_launch_constraints` | Bypass launch constraints | Y | Y | Y |
| 67 | `mov x0,#1` (2x) | `PE_i_can_has_debugger` | Enable kernel debugger | Y | Y | Y | | 67 | `mov x0,#1` (2x) | `PE_i_can_has_debugger` | Enable kernel debugger | Y | Y | Y |
| 8 | NOP | `_postValidation` | Skip AMFI post-validation | Y | Y | Y | | 8 | NOP | `_postValidation` | Skip AMFI post-validation | Y | Y | Y |
| 9 | `cmp w0,w0` | `_postValidation` | Force comparison true | Y | Y | Y | | 9 | `cmp w0,w0` | `_postValidation` | Force comparison true | Y | Y | Y |
| 1011 | `mov w0,#1` (2x) | `_check_dyld_policy_internal` | Allow dyld loading | Y | Y | Y | | 1011 | `mov w0,#1` (2x) | `_check_dyld_policy_internal` | Allow dyld loading | Y | Y | Y |
| 12 | `mov w0,#0` | `_apfs_graft` | Allow APFS graft | Y | Y | Y | | 12 | `mov w0,#0` | `_apfs_graft` | Allow APFS graft | Y | Y | Y |
| 13 | `cmp x0,x0` | `_apfs_vfsop_mount` | Skip mount check | Y | Y | Y | | 13 | `cmp x0,x0` | `_apfs_vfsop_mount` | Skip mount check | Y | Y | Y |
| 14 | `mov w0,#0` | `_apfs_mount_upgrade_checks` | Allow mount upgrade | Y | Y | Y | | 14 | `mov w0,#0` | `_apfs_mount_upgrade_checks` | Allow mount upgrade | Y | Y | Y |
| 15 | `mov w0,#0` | `_handle_fsioc_graft` | Allow fsioc graft | Y | Y | Y | | 15 | `mov w0,#0` | `_handle_fsioc_graft` | Allow fsioc graft | Y | Y | Y |
| 1625 | `mov x0,#0; ret` (5 hooks) | Sandbox MACF ops table | Stub 5 sandbox hooks | Y | Y | Y | | 16 | `NOP` (3x) | `handle_get_dev_by_role` | Bypass APFS role-lookup deny gates for boot mounts (context + entitlement + role==2 path) | Y | Y | Y |
| 1726 | `mov x0,#0; ret` (5 hooks) | Sandbox MACF ops table | Stub 5 sandbox hooks | Y | Y | Y |
#### JB-only kernel patches #### JB-only kernel patches
@@ -138,23 +140,39 @@ Regular and Dev share the same 25 base kernel patches. JB adds 34 additional pat
| 7 | Procursus bootstrap | Bootstrap filesystem + optional Sileo deb | — | — | Y | | 7 | Procursus bootstrap | Bootstrap filesystem + optional Sileo deb | — | — | Y |
| 8 | BaseBin hooks | systemhook.dylib, launchdhook.dylib, libellekit.dylib → `/cores/` | — | — | Y | | 8 | BaseBin hooks | systemhook.dylib, launchdhook.dylib, libellekit.dylib → `/cores/` | — | — | Y |
### CFW Installer Flow Matrix (Script-Level)
| Flow item | Regular (`cfw_install.sh`) | Dev (`cfw_install_dev.sh`) | JB (`cfw_install_jb.sh`) |
| --- | --- | --- | --- |
| Base CFW phases (1/7 → 7/7) | Runs directly | Runs directly | Runs via `CFW_SKIP_HALT=1 zsh cfw_install.sh` |
| Dev overlay (`rpcserver_ios` replacement in `iosbinpack64.tar`) | — | Y (`apply_dev_overlay`) | — |
| SSH readiness wait before install | Y (`wait_for_device_ssh_ready`) | — | Y (inherited from base run) |
| `remote_mount` behavior | Ensures mountpoint and verifies mount success (hard fail) | Best-effort mount only (`mount_apfs ... || true`) | Ensures mountpoint and verifies mount success (hard fail) |
| launchd jetsam patch (`patch-launchd-jetsam`) | — | Y (base-flow injection) | Y (JB-1) |
| launchd dylib injection (`inject-dylib /cores/launchdhook.dylib`) | — | — | Y (JB-1) |
| Procursus bootstrap deployment (`/mnt5/<bootHash>/jb-vphone/procursus`) | — | — | Y (JB-2) |
| BaseBin hook deployment (`*.dylib``/mnt1/cores`) | — | — | Y (JB-3) |
| Additional input resources | `cfw_input` | `cfw_input` + `resources/cfw_dev/rpcserver_ios` | `cfw_input` + `cfw_jb_input` |
| Extra tool requirement beyond base | — | — | `zstd` |
| Halt behavior | Halts unless `CFW_SKIP_HALT=1` | Halts unless `CFW_SKIP_HALT=1` | Always halts after JB phases |
## Summary ## Summary
| Component | Regular | Dev | JB | | Component | Regular | Dev | JB |
| ------------------------ | :-----: | :----: | :----: | | ------------------------ | :-----: | :----: | :-----: |
| AVPBooter | 1 | 1 | 1 | | AVPBooter | 1 | 1 | 1 |
| iBSS | 2 | 2 | 3 | | iBSS | 2 | 2 | 3 |
| iBEC | 3 | 3 | 3 | | iBEC | 3 | 3 | 3 |
| LLB | 6 | 6 | 6 | | LLB | 6 | 6 | 6 |
| TXM | 1 | 12 | 12 | | TXM | 1 | 12 | 12 |
| Kernel | 25 | 25 | 59 | | Kernel | 28 | 28 | 62 |
| **Boot chain total** | **38** | **49** | **84** | | **Boot chain total** | **41** | **52** | **87** |
| | | | | | | | | |
| CFW binary patches | 4 | 5 | 6 | | CFW binary patches | 4 | 5 | 6 |
| CFW installed components | 6 | 7 | 8 | | CFW installed components | 6 | 7 | 8 |
| **CFW total** | **10** | **12** | **14** | | **CFW total** | **10** | **12** | **14** |
| | | | | | | | | |
| **Grand total** | **48** | **61** | **98** | | **Grand total** | **51** | **64** | **101** |
### What each variant adds ### What each variant adds
@@ -184,18 +202,25 @@ Regular and Dev share the same 25 base kernel patches. JB adds 34 additional pat
- `jb/org.coolstar.sileo_2.5.1_iphoneos-arm64.deb` - `jb/org.coolstar.sileo_2.5.1_iphoneos-arm64.deb`
- `basebin/*.dylib` (BaseBin hooks for JB-3) - `basebin/*.dylib` (BaseBin hooks for JB-3)
## Ramdisk Kernel Split (JB mode) ## Ramdisk Variant Matrix (`make ramdisk_build`)
- `scripts/fw_patch_jb.py` now snapshots the base/dev-patched kernel before JB kernel extensions: Why `ramdisk_build` still prints patch logs:
- `iPhone*_Restore/kernelcache.research.vphone600.ramdisk`
- `scripts/ramdisk_build.py` uses that snapshot to build: - Step 6 patches `Firmware/txm.iphoneos.release.im4p` via `patch_txm()` (1 trustcache-bypass patch), then signs `Ramdisk/txm.img4`.
- `Ramdisk/krnl.ramdisk.img4` (base/dev kernel for SSH ramdisk boot + CFW install) - Step 7 may derive `kernelcache.research.vphone600.ramdisk` from pristine CloudOS and apply base `KernelPatcher` (28 patches), then signs `Ramdisk/krnl.ramdisk.img4`.
- `Ramdisk/krnl.img4` (post-JB kernel, unchanged restore target) - Step 7 also always signs restore kernel as `Ramdisk/krnl.img4`.
- `scripts/ramdisk_send.sh` now prefers `krnl.ramdisk.img4` when present, otherwise falls back to `krnl.img4`.
- Intent: keep restore kernel fully JB-patched while booting the installer ramdisk with a | Variant | Pre-step before `make ramdisk_build` | `Ramdisk/txm.img4` | `Ramdisk/krnl.ramdisk.img4` | `Ramdisk/krnl.img4` | Effective kernel used by `ramdisk_send.sh` |
more conservative kernel variant to improve `/dev/disk1s1` remount reliability. | ------------- | ------------------------------------ | -------------------------------- | -------------------------------------------------------------------------------------- | -------------------------------------------------------- | ----------------------------------------------------- |
- Investigation details and runtime evidence: | `RAMDISK` | `make fw_patch` | release TXM + base TXM patch (1) | base kernel (28): use legacy `*.ramdisk` if present, else derive from pristine CloudOS | restore kernel from `fw_patch` (28) | `krnl.ramdisk.img4` (preferred), fallback `krnl.img4` |
- `research/jb_mount_failure_investigation_2026-03-04.md` | `DEV+RAMDISK` | `make fw_patch_dev` | release TXM + base TXM patch (1) | base kernel (28): same derivation rule as above | restore kernel from `fw_patch_dev` (28) | `krnl.ramdisk.img4` (preferred), fallback `krnl.img4` |
| `JB+RAMDISK` | `make fw_patch_jb` | release TXM + base TXM patch (1) | base kernel (28): same derivation rule as above | restore kernel from `fw_patch_jb` (62 = 28 base + 34 JB) | `krnl.ramdisk.img4` (preferred), fallback `krnl.img4` |
Notes:
- `scripts/fw_patch_jb.py` no longer creates a ramdisk snapshot file directly.
- Intent: keep ramdisk boot on a conservative base kernel while preserving full patched restore kernel for later JB flow.
- Investigation details and runtime evidence: `research/jb_mount_failure_investigation_2026-03-04.md`
## Dynamic Implementation Log (JB Patchers) ## Dynamic Implementation Log (JB Patchers)
@@ -270,6 +295,12 @@ with capstone semantic matching and keystone-generated patch bytes only:
5. `_postValidation` additional CMP bypass 5. `_postValidation` additional CMP bypass
6. `_proc_security_policy` stub (mov x0,#0; ret) — FIXED: was patching copyio instead 6. `_proc_security_policy` stub (mov x0,#0; ret) — FIXED: was patching copyio instead
- Runtime optimization (2026-03-05): locator switched from capstone full-text scan to
raw instruction-mask matching (`sub wN,wM,#1 ; cmp wN,#0x21`, strict W-form) + raw
BL decode in `_proc_info` body; shared `_proc_info` anchor scan cache reused by
`_proc_pidinfo`.
- JB timing logger readability tweak (2026-03-05): per-method `[T]` and timing summary
now only print slow methods (runtime `>=10s`), patch output/selection unchanged.
7. `_proc_pidinfo` pid-0 guard NOP (2 sites) 7. `_proc_pidinfo` pid-0 guard NOP (2 sites)
8. `_convert_port_to_map_with_flavor` panic skip — FIXED: was patching PAC check instead 8. `_convert_port_to_map_with_flavor` panic skip — FIXED: was patching PAC check instead
9. `_vm_fault_enter_prepare` PMAP check NOP 9. `_vm_fault_enter_prepare` PMAP check NOP
@@ -290,14 +321,14 @@ with capstone semantic matching and keystone-generated patch bytes only:
21. `_cred_label_update_execve` cs_flags shellcode 21. `_cred_label_update_execve` cs_flags shellcode
22. `_syscallmask_apply_to_proc` filter mask shellcode 22. `_syscallmask_apply_to_proc` filter mask shellcode
23. `_hook_cred_label_update_execve` inline trampoline + vnode_getattr shellcode 23. `_hook_cred_label_update_execve` inline trampoline + vnode_getattr shellcode
- Code cave restricted to __TEXT_EXEC only (__PRELINK_TEXT excluded due to KTRR) - Code cave restricted to **TEXT_EXEC only (**PRELINK_TEXT excluded due to KTRR)
- Inline trampoline (B cave at function entry) replaces ops table pointer rewrite - Inline trampoline (B cave at function entry) replaces ops table pointer rewrite
- Ops table pointer modification breaks chained fixup integrity → PAC failures - Ops table pointer modification breaks chained fixup integrity → PAC failures
24. `kcall10` syscall 439 replacement shellcode 24. `kcall10` syscall 439 replacement shellcode
- Sysent table base found via backward scan from first `_nosys` match (entry 0 is indirect syscall, not `_nosys`) - Sysent table base found via backward scan from first `_nosys` match (entry 0 is indirect syscall, not `_nosys`)
- `sy_call` encoded as auth rebase chained fixup pointer (diversity=0xBCAD, key=IA, addrDiv=0) - `sy_call` encoded as auth rebase chained fixup pointer (diversity=0xBCAD, key=IA, addrDiv=0)
- Matches dispatch's `BLRAA X8, X17` with `X17=0xBCAD` PAC authentication - Matches dispatch's `BLRAA X8, X17` with `X17=0xBCAD` PAC authentication
- Chain `next` field preserved from original entry to maintain fixup chain integrity - Chain `next` field preserved from original entry to maintain fixup chain integrity
## Cross-Version Dynamic Snapshot ## Cross-Version Dynamic Snapshot

View File

@@ -9,7 +9,6 @@ This script extends fw_patch_dev with additional JB-oriented patches.
""" """
import os import os
import shutil
import sys import sys
from fw_patch import ( from fw_patch import (
@@ -26,9 +25,6 @@ from fw_patch_dev import patch_txm_dev
from patchers.iboot_jb import IBootJBPatcher from patchers.iboot_jb import IBootJBPatcher
from patchers.kernel_jb import KernelJBPatcher from patchers.kernel_jb import KernelJBPatcher
RAMDISK_KERNEL_SUFFIX = ".ramdisk"
KERNEL_SEARCH_PATTERNS = ["kernelcache.research.vphone600"]
def patch_ibss_jb(data): def patch_ibss_jb(data):
p = IBootJBPatcher(data, mode="ibss", label="Loaded iBSS") p = IBootJBPatcher(data, mode="ibss", label="Loaded iBSS")
@@ -73,17 +69,6 @@ JB_COMPONENTS = [
True, True,
), ),
] ]
def snapshot_base_kernel_for_ramdisk(restore_dir):
"""Save base/dev-patched kernel before JB extensions for ramdisk boot."""
kernel_path = find_file(restore_dir, KERNEL_SEARCH_PATTERNS, "kernelcache")
ramdisk_kernel_path = f"{kernel_path}{RAMDISK_KERNEL_SUFFIX}"
shutil.copy2(kernel_path, ramdisk_kernel_path)
print(f"[*] Saved ramdisk kernel snapshot: {ramdisk_kernel_path}")
return ramdisk_kernel_path
def main(): def main():
vm_dir = sys.argv[1] if len(sys.argv) > 1 else os.getcwd() vm_dir = sys.argv[1] if len(sys.argv) > 1 else os.getcwd()
vm_dir = os.path.abspath(vm_dir) vm_dir = os.path.abspath(vm_dir)
@@ -106,8 +91,6 @@ def main():
path = find_file(search_base, patterns, name) path = find_file(search_base, patterns, name)
patch_component(path, patch_fn, name, preserve_payp) patch_component(path, patch_fn, name, preserve_payp)
snapshot_base_kernel_for_ramdisk(restore_dir)
if JB_COMPONENTS: if JB_COMPONENTS:
print(f"\n[*] Applying {len(JB_COMPONENTS)} JB extension patches ...") print(f"\n[*] Applying {len(JB_COMPONENTS)} JB extension patches ...")
for name, in_restore, patterns, patch_fn, preserve_payp in JB_COMPONENTS: for name, in_restore, patterns, patch_fn, preserve_payp in JB_COMPONENTS:

View File

@@ -55,8 +55,7 @@ class KernelPatcher(
def find_all(self): def find_all(self):
"""Find and record all kernel patches. Returns list of (offset, bytes, desc).""" """Find and record all kernel patches. Returns list of (offset, bytes, desc)."""
self.patches = [] self._reset_patch_state()
self._patch_num = 0
self.patch_apfs_root_snapshot() # 1 self.patch_apfs_root_snapshot() # 1
self.patch_apfs_seal_broken() # 2 self.patch_apfs_seal_broken() # 2
self.patch_bsd_init_rootvp() # 3 self.patch_bsd_init_rootvp() # 3
@@ -69,7 +68,8 @@ class KernelPatcher(
self.patch_apfs_vfsop_mount_cmp() # 13 self.patch_apfs_vfsop_mount_cmp() # 13
self.patch_apfs_mount_upgrade_checks() # 14 self.patch_apfs_mount_upgrade_checks() # 14
self.patch_handle_fsioc_graft() # 15 self.patch_handle_fsioc_graft() # 15
self.patch_sandbox_hooks() # 16-25 self.patch_apfs_get_dev_by_role_entitlement() # 16
self.patch_sandbox_hooks() # 17-26
return self.patches return self.patches
def apply(self): def apply(self):

View File

@@ -1,6 +1,6 @@
"""Base class with all infrastructure for kernel patchers.""" """Base class with all infrastructure for kernel patchers."""
import struct, plistlib import struct, plistlib, threading
from collections import defaultdict from collections import defaultdict
from capstone.arm64_const import ( from capstone.arm64_const import (
@@ -26,8 +26,16 @@ class KernelPatcherBase:
self.raw = bytes(data) # immutable snapshot for searching self.raw = bytes(data) # immutable snapshot for searching
self.size = len(data) self.size = len(data)
self.patches = [] # collected (offset, bytes, description) self.patches = [] # collected (offset, bytes, description)
self._patch_by_off = {} # offset -> (patch_bytes, desc)
self.verbose = verbose self.verbose = verbose
self._patch_num = 0 # running counter for clean one-liners self._patch_num = 0 # running counter for clean one-liners
self._emit_lock = threading.Lock()
# Hot-path caches (search/disassembly is repeated heavily in JB mode).
self._disas_cache = {}
self._disas_cache_limit = 200_000
self._string_refs_cache = {}
self._func_start_cache = {}
self._log("[*] Parsing Mach-O segments …") self._log("[*] Parsing Mach-O segments …")
self._parse_macho() self._parse_macho()
@@ -52,6 +60,12 @@ class KernelPatcherBase:
if self.verbose: if self.verbose:
print(msg) print(msg)
def _reset_patch_state(self):
"""Reset patch bookkeeping before a fresh find/apply pass."""
self.patches = []
self._patch_by_off = {}
self._patch_num = 0
# ── Mach-O / segment parsing ───────────────────────────────── # ── Mach-O / segment parsing ─────────────────────────────────
def _parse_macho(self): def _parse_macho(self):
"""Parse top-level Mach-O: discover BASE_VA, segments, code ranges.""" """Parse top-level Mach-O: discover BASE_VA, segments, code ranges."""
@@ -314,11 +328,26 @@ class KernelPatcherBase:
# ── Helpers ────────────────────────────────────────────────── # ── Helpers ──────────────────────────────────────────────────
def _disas_at(self, off, count=1): def _disas_at(self, off, count=1):
"""Disassemble *count* instructions at file offset. Returns a list.""" """Disassemble *count* instructions at file offset. Returns a list."""
end = min(off + count * 4, self.size)
if off < 0 or off >= self.size: if off < 0 or off >= self.size:
return [] return []
key = None
if count <= 4:
key = (off, count)
cached = self._disas_cache.get(key)
if cached is not None:
return cached
end = min(off + count * 4, self.size)
code = bytes(self.raw[off:end]) code = bytes(self.raw[off:end])
return list(_cs.disasm(code, off, count)) insns = list(_cs.disasm(code, off, count))
if key is not None:
if len(self._disas_cache) >= self._disas_cache_limit:
self._disas_cache.clear()
self._disas_cache[key] = insns
return insns
def _is_bl(self, off): def _is_bl(self, off):
"""Return BL target file offset, or -1 if not a BL.""" """Return BL target file offset, or -1 if not a BL."""
@@ -354,6 +383,11 @@ class KernelPatcherBase:
def find_string_refs(self, str_off, code_start=None, code_end=None): def find_string_refs(self, str_off, code_start=None, code_end=None):
"""Find all (adrp_off, add_off, dest_reg) referencing str_off via ADRP+ADD.""" """Find all (adrp_off, add_off, dest_reg) referencing str_off via ADRP+ADD."""
key = (str_off, code_start, code_end)
cached = self._string_refs_cache.get(key)
if cached is not None:
return cached
target_va = self._va(str_off) target_va = self._va(str_off)
target_page = target_va & ~0xFFF target_page = target_va & ~0xFFF
page_off = target_va & 0xFFF page_off = target_va & 0xFFF
@@ -375,6 +409,7 @@ class KernelPatcherBase:
if add_rn == rd and add_imm == page_off: if add_rn == rd and add_imm == page_off:
add_rd = nxt & 0x1F add_rd = nxt & 0x1F
refs.append((adrp_off, adrp_off + 4, add_rd)) refs.append((adrp_off, adrp_off + 4, add_rd))
self._string_refs_cache[key] = refs
return refs return refs
def find_function_start(self, off, max_back=0x4000): def find_function_start(self, off, max_back=0x4000):
@@ -384,19 +419,33 @@ class KernelPatcherBase:
bytes to look for PACIBSP (ARM64e functions may have several STP bytes to look for PACIBSP (ARM64e functions may have several STP
instructions in the prologue before STP x29,x30). instructions in the prologue before STP x29,x30).
""" """
use_cache = max_back == 0x4000
if use_cache:
cached = self._func_start_cache.get(off)
if cached is not None:
return cached
result = -1
for o in range(off - 4, max(off - max_back, 0), -4): for o in range(off - 4, max(off - max_back, 0), -4):
insn = _rd32(self.raw, o) insn = _rd32(self.raw, o)
if insn == _PACIBSP_U32: if insn == _PACIBSP_U32:
return o result = o
break
dis = self._disas_at(o) dis = self._disas_at(o)
if dis and dis[0].mnemonic == "stp" and "x29, x30, [sp" in dis[0].op_str: if dis and dis[0].mnemonic == "stp" and "x29, x30, [sp" in dis[0].op_str:
# Check further back for PACIBSP (prologue may have # Check further back for PACIBSP (prologue may have
# multiple STP instructions before x29,x30) # multiple STP instructions before x29,x30)
for k in range(o - 4, max(o - 0x24, 0), -4): for k in range(o - 4, max(o - 0x24, 0), -4):
if _rd32(self.raw, k) == _PACIBSP_U32: if _rd32(self.raw, k) == _PACIBSP_U32:
return k result = k
return o break
return -1 if result < 0:
result = o
break
if use_cache:
self._func_start_cache[off] = result
return result
def _disas_n(self, buf, off, count): def _disas_n(self, buf, off, count):
"""Disassemble *count* instructions from *buf* at file offset *off*.""" """Disassemble *count* instructions from *buf* at file offset *off*."""
@@ -454,10 +503,24 @@ class KernelPatcherBase:
Writing through to self.data ensures _find_code_cave() sees Writing through to self.data ensures _find_code_cave() sees
previously allocated shellcode and won't reuse the same cave. previously allocated shellcode and won't reuse the same cave.
""" """
self.patches.append((off, patch_bytes, desc)) patch_bytes = bytes(patch_bytes)
self.data[off : off + len(patch_bytes)] = patch_bytes with self._emit_lock:
self._patch_num += 1 existing = self._patch_by_off.get(off)
print(f" [{self._patch_num:2d}] 0x{off:08X} {desc}") if existing is not None:
existing_bytes, existing_desc = existing
if existing_bytes != patch_bytes:
raise RuntimeError(
f"Conflicting patch at 0x{off:08X}: "
f"{existing_desc!r} vs {desc!r}"
)
return
self._patch_by_off[off] = (patch_bytes, desc)
self.patches.append((off, patch_bytes, desc))
self.data[off : off + len(patch_bytes)] = patch_bytes
self._patch_num += 1
patch_num = self._patch_num
print(f" [{patch_num:2d}] 0x{off:08X} {desc}")
if self.verbose: if self.verbose:
self._print_patch_context(off, patch_bytes, desc) self._print_patch_context(off, patch_bytes, desc)
@@ -614,4 +677,3 @@ class KernelPatcherBase:
if val == 0: if val == 0:
return 0 return 0
return self._decode_chained_ptr(val) return self._decode_chained_ptr(val)

View File

@@ -1,5 +1,7 @@
"""kernel_jb.py — Jailbreak extension patcher for iOS kernelcache.""" """kernel_jb.py — Jailbreak extension patcher for iOS kernelcache."""
import time
from .kernel_jb_base import KernelJBPatcherBase from .kernel_jb_base import KernelJBPatcherBase
from .kernel_jb_patch_amfi_trustcache import KernelJBPatchAmfiTrustcacheMixin from .kernel_jb_patch_amfi_trustcache import KernelJBPatchAmfiTrustcacheMixin
from .kernel_jb_patch_amfi_execve import KernelJBPatchAmfiExecveMixin from .kernel_jb_patch_amfi_execve import KernelJBPatchAmfiExecveMixin
@@ -54,38 +56,80 @@ class KernelJBPatcher(
KernelJBPatchAmfiTrustcacheMixin, KernelJBPatchAmfiTrustcacheMixin,
KernelJBPatcherBase, KernelJBPatcherBase,
): ):
_TIMING_LOG_MIN_SECONDS = 10.0
_GROUP_AB_METHODS = (
"patch_amfi_cdhash_in_trustcache", # A1
"patch_amfi_execve_kill_path", # A2
"patch_task_conversion_eval_internal", # A3
"patch_sandbox_hooks_extended", # A4
"patch_post_validation_additional", # B5
"patch_proc_security_policy", # B6
"patch_proc_pidinfo", # B7
"patch_convert_port_to_map", # B8
"patch_vm_fault_enter_prepare", # B9
"patch_vm_map_protect", # B10
"patch_mac_mount", # B11
"patch_dounmount", # B12
"patch_bsd_init_auth", # B13
"patch_spawn_validate_persona", # B14
"patch_task_for_pid", # B15
"patch_load_dylinker", # B16
"patch_shared_region_map", # B17
"patch_nvram_verify_permission", # B18
"patch_io_secure_bsd_root", # B19
"patch_thid_should_crash", # B20
)
_GROUP_C_METHODS = (
"patch_cred_label_update_execve", # C21
"patch_syscallmask_apply_to_proc", # C22
"patch_hook_cred_label_update_execve", # C23
"patch_kcall10", # C24
)
def __init__(self, data, verbose=False):
super().__init__(data, verbose)
self.patch_timings = []
def _run_patch_method_timed(self, method_name):
before = len(self.patches)
t0 = time.perf_counter()
getattr(self, method_name)()
dt = time.perf_counter() - t0
added = len(self.patches) - before
self.patch_timings.append((method_name, dt, added))
if dt >= self._TIMING_LOG_MIN_SECONDS:
print(f" [T] {method_name:36s} {dt:7.3f}s (+{added})")
def _run_methods(self, methods):
for method_name in methods:
self._run_patch_method_timed(method_name)
def _print_timing_summary(self):
if not self.patch_timings:
return
slow_items = [
item
for item in sorted(self.patch_timings, key=lambda item: item[1], reverse=True)
if item[1] >= self._TIMING_LOG_MIN_SECONDS
]
if not slow_items:
return
print(
"\n [Timing Summary] JB patch method cost (desc, >= "
f"{self._TIMING_LOG_MIN_SECONDS:.0f}s):"
)
for method_name, dt, added in slow_items:
print(f" {dt:7.3f}s (+{added:3d}) {method_name}")
def find_all(self): def find_all(self):
self.patches = [] self._reset_patch_state()
self.patch_timings = []
# Group A: Existing patches self._run_methods(self._GROUP_AB_METHODS)
self.patch_amfi_cdhash_in_trustcache() # A1 self._run_methods(self._GROUP_C_METHODS)
self.patch_amfi_execve_kill_path() # A2 self._print_timing_summary()
self.patch_task_conversion_eval_internal() # A3
self.patch_sandbox_hooks_extended() # A4
# Group B: Simple patches (string-anchored / pattern-matched)
self.patch_post_validation_additional() # B5
self.patch_proc_security_policy() # B6 (fixed: was patching copyio)
self.patch_proc_pidinfo() # B7
self.patch_convert_port_to_map() # B8 (fixed: was patching PAC check)
self.patch_vm_fault_enter_prepare() # B9
self.patch_vm_map_protect() # B10
self.patch_mac_mount() # B11
self.patch_dounmount() # B12
self.patch_bsd_init_auth() # B13
self.patch_spawn_validate_persona() # B14
self.patch_task_for_pid() # B15
self.patch_load_dylinker() # B16
self.patch_shared_region_map() # B17
self.patch_nvram_verify_permission() # B18
self.patch_io_secure_bsd_root() # B19
self.patch_thid_should_crash() # B20
# Group C: Complex shellcode patches
self.patch_cred_label_update_execve() # C21
self.patch_syscallmask_apply_to_proc() # C22
self.patch_hook_cred_label_update_execve() # C23
self.patch_kcall10() # C24
return self.patches return self.patches

View File

@@ -3,6 +3,7 @@
import struct import struct
from collections import Counter from collections import Counter
from .kernel_asm import _PACIBSP_U32
from capstone.arm64_const import ( from capstone.arm64_const import (
ARM64_OP_REG, ARM64_OP_REG,
ARM64_OP_IMM, ARM64_OP_IMM,
@@ -38,6 +39,9 @@ MOV_X8_XZR = asm("mov x8, xzr")
class KernelJBPatcherBase(KernelPatcher): class KernelJBPatcherBase(KernelPatcher):
def __init__(self, data, verbose=False): def __init__(self, data, verbose=False):
super().__init__(data, verbose) super().__init__(data, verbose)
self._jb_scan_cache = {}
self._proc_info_anchor_scanned = False
self._proc_info_anchor = (-1, -1)
self._build_symbol_table() self._build_symbol_table()
# ── Symbol table (best-effort, may find 0 on stripped kernels) ── # ── Symbol table (best-effort, may find 0 on stripped kernels) ──
@@ -122,6 +126,79 @@ class KernelJBPatcherBase(KernelPatcher):
"""Look up a function symbol, return file offset or -1.""" """Look up a function symbol, return file offset or -1."""
return self.symbols.get(name, -1) return self.symbols.get(name, -1)
# ── Shared kernel anchor finders ──────────────────────────────
def _find_proc_info_anchor(self):
"""Find `_proc_info` switch anchor as (func_start, switch_off).
Shared by B6/B7 patches. Cached because searching this anchor in
`kern_text` is expensive on stripped kernels.
"""
if self._proc_info_anchor_scanned:
return self._proc_info_anchor
def _scan_range(start, end):
"""Fast raw matcher for:
sub wN, wM, #1
cmp wN, #0x21
"""
key = ("proc_info_switch", start, end)
cached = self._jb_scan_cache.get(key)
if cached is not None:
return cached
scan_start = max(start, 0)
limit = min(end - 8, self.size - 8)
for off in range(scan_start, limit, 4):
i0 = _rd32(self.raw, off)
# SUB (immediate), 32-bit
if (i0 & 0xFF000000) != 0x51000000:
continue
if ((i0 >> 22) & 1) != 0: # sh must be 0
continue
if ((i0 >> 10) & 0xFFF) != 1:
continue
sub_rd = i0 & 0x1F
i1 = _rd32(self.raw, off + 4)
# CMP wN,#imm == SUBS wzr,wN,#imm alias (rd must be wzr)
if (i1 & 0xFF00001F) != 0x7100001F:
continue
if ((i1 >> 22) & 1) != 0: # sh must be 0
continue
if ((i1 >> 10) & 0xFFF) != 0x21:
continue
cmp_rn = (i1 >> 5) & 0x1F
if sub_rd != cmp_rn:
continue
self._jb_scan_cache[key] = off
return off
self._jb_scan_cache[key] = -1
return -1
# Prefer direct symbol when present.
proc_info_func = self._resolve_symbol("_proc_info")
if proc_info_func >= 0:
search_end = min(proc_info_func + 0x800, self.size)
switch_off = _scan_range(proc_info_func, search_end)
if switch_off < 0:
switch_off = proc_info_func
self._proc_info_anchor = (proc_info_func, switch_off)
self._proc_info_anchor_scanned = True
return self._proc_info_anchor
ks, ke = self.kern_text
switch_off = _scan_range(ks, ke)
if switch_off >= 0:
proc_info_func = self.find_function_start(switch_off)
self._proc_info_anchor = (proc_info_func, switch_off)
else:
self._proc_info_anchor = (-1, -1)
self._proc_info_anchor_scanned = True
return self._proc_info_anchor
# ── Code cave finder ────────────────────────────────────────── # ── Code cave finder ──────────────────────────────────────────
def _find_code_cave(self, size, align=4): def _find_code_cave(self, size, align=4):
@@ -182,8 +259,7 @@ class KernelJBPatcherBase(KernelPatcher):
"""Find the end of a function (next PACIBSP or limit).""" """Find the end of a function (next PACIBSP or limit)."""
limit = min(func_start + max_size, self.size) limit = min(func_start + max_size, self.size)
for off in range(func_start + 4, limit, 4): for off in range(func_start + 4, limit, 4):
d = self._disas_at(off) if _rd32(self.raw, off) == _PACIBSP_U32:
if d and d[0].mnemonic == "pacibsp":
return off return off
return limit return limit

View File

@@ -1,9 +1,16 @@
"""Mixin: KernelJBPatchBsdInitAuthMixin.""" """Mixin: KernelJBPatchBsdInitAuthMixin."""
from .kernel_jb_base import MOV_X0_0 from .kernel_jb_base import MOV_X0_0, _rd32
class KernelJBPatchBsdInitAuthMixin: class KernelJBPatchBsdInitAuthMixin:
# ldr x0, [xN, #0x2b8] (ignore xN/Rn)
_LDR_X0_2B8_MASK = 0xFFFFFC1F
_LDR_X0_2B8_VAL = 0xF9415C00
# cbz {w0|x0}, <label> (mask drops sf bit)
_CBZ_X0_MASK = 0x7F00001F
_CBZ_X0_VAL = 0x34000000
def patch_bsd_init_auth(self): def patch_bsd_init_auth(self):
"""Bypass rootvp authentication check in _bsd_init. """Bypass rootvp authentication check in _bsd_init.
Pattern: ldr x0, [xN, #0x2b8]; cbz x0, ...; bl AUTH_FUNC Pattern: ldr x0, [xN, #0x2b8]; cbz x0, ...; bl AUTH_FUNC
@@ -22,21 +29,7 @@ class KernelJBPatchBsdInitAuthMixin:
# Pattern search: ldr x0, [xN, #0x2b8]; cbz x0; bl # Pattern search: ldr x0, [xN, #0x2b8]; cbz x0; bl
ks, ke = self.kern_text ks, ke = self.kern_text
candidates = [] candidates = self._collect_auth_bl_candidates(ks, ke)
for off in range(ks, ke - 8, 4):
d = self._disas_at(off, 3)
if len(d) < 3:
continue
i0, i1, i2 = d[0], d[1], d[2]
if i0.mnemonic != "ldr" or i1.mnemonic != "cbz" or i2.mnemonic != "bl":
continue
if not i0.op_str.startswith("x0,"):
continue
if "#0x2b8" not in i0.op_str:
continue
if not i1.op_str.startswith("x0,"):
continue
candidates.append(off + 8) # the BL offset
if not candidates: if not candidates:
self._log(" [-] ldr+cbz+bl pattern not found") self._log(" [-] ldr+cbz+bl pattern not found")
@@ -57,6 +50,11 @@ class KernelJBPatchBsdInitAuthMixin:
def _find_auth_bl(self, start, end): def _find_auth_bl(self, start, end):
"""Find ldr x0,[xN,#0x2b8]; cbz x0; bl pattern. Returns BL offset.""" """Find ldr x0,[xN,#0x2b8]; cbz x0; bl pattern. Returns BL offset."""
cands = self._collect_auth_bl_candidates(start, end)
if cands:
return cands[0]
# Fallback for unexpected instruction variants.
for off in range(start, end - 8, 4): for off in range(start, end - 8, 4):
d = self._disas_at(off, 3) d = self._disas_at(off, 3)
if len(d) < 3: if len(d) < 3:
@@ -67,3 +65,23 @@ class KernelJBPatchBsdInitAuthMixin:
if i1.op_str.startswith("x0,"): if i1.op_str.startswith("x0,"):
return off + 8 return off + 8
return None return None
def _collect_auth_bl_candidates(self, start, end):
"""Fast matcher using raw instruction masks (no capstone in hot loop)."""
out = []
limit = min(end - 8, self.size - 8)
for off in range(max(start, 0), limit, 4):
i0 = _rd32(self.raw, off)
if (i0 & self._LDR_X0_2B8_MASK) != self._LDR_X0_2B8_VAL:
continue
i1 = _rd32(self.raw, off + 4)
if (i1 & self._CBZ_X0_MASK) != self._CBZ_X0_VAL:
continue
i2 = _rd32(self.raw, off + 8)
if (i2 & 0xFC000000) != 0x94000000: # BL imm26
continue
out.append(off + 8)
return out

View File

@@ -1,6 +1,6 @@
"""Mixin: KernelJBPatchProcPidinfoMixin.""" """Mixin: KernelJBPatchProcPidinfoMixin."""
from .kernel_jb_base import ARM64_OP_IMM, NOP from .kernel_jb_base import NOP
class KernelJBPatchProcPidinfoMixin: class KernelJBPatchProcPidinfoMixin:
@@ -30,28 +30,8 @@ class KernelJBPatchProcPidinfoMixin:
self.emit(hits[1], NOP, "NOP [_proc_pidinfo pid-0 guard B]") self.emit(hits[1], NOP, "NOP [_proc_pidinfo pid-0 guard B]")
return True return True
# Find _proc_info by switch table pattern (same as proc_security_policy) # Reuse proc_info anchor from proc_security path (cached).
proc_info_func = -1 proc_info_func, _ = self._find_proc_info_anchor()
ks, ke = self.kern_text
for off in range(ks, ke - 8, 4):
d = self._disas_at(off, 2)
if len(d) < 2:
continue
i0, i1 = d[0], d[1]
if i0.mnemonic != "sub" or i1.mnemonic != "cmp":
continue
if len(i0.operands) < 3:
continue
if i0.operands[2].type != ARM64_OP_IMM or i0.operands[2].imm != 1:
continue
if len(i1.operands) < 2:
continue
if i1.operands[1].type != ARM64_OP_IMM or i1.operands[1].imm != 0x21:
continue
if i0.operands[0].reg != i1.operands[0].reg:
continue
proc_info_func = self.find_function_start(off)
break
if proc_info_func < 0: if proc_info_func < 0:
self._log(" [-] _proc_info function not found") self._log(" [-] _proc_info function not found")

View File

@@ -1,6 +1,6 @@
"""Mixin: KernelJBPatchProcSecurityMixin.""" """Mixin: KernelJBPatchProcSecurityMixin."""
from .kernel_jb_base import ARM64_OP_IMM, MOV_X0_0, RET, Counter, _rd32, struct from .kernel_jb_base import MOV_X0_0, RET, Counter, _rd32
class KernelJBPatchProcSecurityMixin: class KernelJBPatchProcSecurityMixin:
@@ -22,31 +22,10 @@ class KernelJBPatchProcSecurityMixin:
self.emit(foff + 4, RET, "ret [_proc_security_policy]") self.emit(foff + 4, RET, "ret [_proc_security_policy]")
return True return True
# Find _proc_info by its distinctive switch table # Find _proc_info by switch pattern:
# Pattern: sub wN, wM, #1; cmp wN, #0x21 (33 = max proc_info callnum) # sub wN,wM,#1 ; cmp wN,#0x21
proc_info_func = -1 proc_info_func, switch_off = self._find_proc_info_anchor()
switch_off = -1
ks, ke = self.kern_text ks, ke = self.kern_text
for off in range(ks, ke - 8, 4):
d = self._disas_at(off, 2)
if len(d) < 2:
continue
i0, i1 = d[0], d[1]
if i0.mnemonic != "sub" or i1.mnemonic != "cmp":
continue
if len(i0.operands) < 3:
continue
if i0.operands[2].type != ARM64_OP_IMM or i0.operands[2].imm != 1:
continue
if len(i1.operands) < 2:
continue
if i1.operands[1].type != ARM64_OP_IMM or i1.operands[1].imm != 0x21:
continue
if i0.operands[0].reg != i1.operands[0].reg:
continue
proc_info_func = self.find_function_start(off)
switch_off = off
break
if proc_info_func < 0: if proc_info_func < 0:
self._log(" [-] _proc_info function not found") self._log(" [-] _proc_info function not found")
@@ -62,8 +41,14 @@ class KernelJBPatchProcSecurityMixin:
# since security policy is called from switch cases not the prologue) # since security policy is called from switch cases not the prologue)
bl_targets = Counter() bl_targets = Counter()
for off in range(switch_off, proc_info_end, 4): for off in range(switch_off, proc_info_end, 4):
target = self._is_bl(off) insn = _rd32(self.raw, off)
if target >= 0 and ks <= target < ke: if (insn & 0xFC000000) != 0x94000000:
continue
imm26 = insn & 0x3FFFFFF
if imm26 & (1 << 25):
imm26 -= 1 << 26
target = off + imm26 * 4
if ks <= target < ke:
bl_targets[target] += 1 bl_targets[target] += 1
if not bl_targets: if not bl_targets:

View File

@@ -1,16 +1,125 @@
"""Mixin: KernelJBPatchTaskConversionMixin.""" """Mixin: KernelJBPatchTaskConversionMixin."""
from .kernel_jb_base import ARM64_OP_REG, ARM64_OP_MEM, ARM64_REG_X0, ARM64_REG_X1, CMP_XZR_XZR from .kernel_jb_base import (
ARM64_OP_REG,
ARM64_OP_MEM,
ARM64_REG_X0,
ARM64_REG_X1,
CMP_XZR_XZR,
asm,
struct,
_rd32,
)
def _u32(insn):
return struct.unpack("<I", asm(insn))[0]
def _derive_mask_and_value(insns):
vals = [_u32(i) for i in insns]
mask = 0xFFFFFFFF
for v in vals[1:]:
mask &= ~(vals[0] ^ v)
value = vals[0] & mask
return mask, value
def _field_mask(total_bits=32, variable_fields=()):
mask = (1 << total_bits) - 1
for start, width in variable_fields:
mask &= ~(((1 << width) - 1) << start)
return mask & ((1 << total_bits) - 1)
class KernelJBPatchTaskConversionMixin: class KernelJBPatchTaskConversionMixin:
# Build all matcher constants from keystone-assembled instruction bytes.
# No hardcoded opcode constants.
_CMP_XN_X0_MASK, _CMP_XN_X0_VAL = _derive_mask_and_value(
("cmp x0, x0", "cmp x1, x0", "cmp x30, x0")
)
_CMP_XN_X1_MASK, _CMP_XN_X1_VAL = _derive_mask_and_value(
("cmp x0, x1", "cmp x1, x1", "cmp x30, x1")
)
_BEQ_MASK = _field_mask(variable_fields=((5, 19),))
_BEQ_VAL = _u32("b.eq #0x100") & _BEQ_MASK
_LDR_X_UNSIGNED_MASK = _field_mask(variable_fields=((0, 5), (5, 5), (10, 12)))
_LDR_X_UNSIGNED_VAL = _u32("ldr x0, [x0]") & _LDR_X_UNSIGNED_MASK
def patch_task_conversion_eval_internal(self): def patch_task_conversion_eval_internal(self):
"""Allow task conversion: cmp Xn,x0 -> cmp xzr,xzr at unique guard site.""" """Allow task conversion: cmp Xn,x0 -> cmp xzr,xzr at unique guard site."""
self._log("\n[JB] task_conversion_eval_internal: cmp xzr,xzr") self._log("\n[JB] task_conversion_eval_internal: cmp xzr,xzr")
candidates = []
ks, ke = self.kern_text ks, ke = self.kern_text
for off in range(ks + 4, ke - 12, 4): candidates = self._collect_candidates_fast(ks, ke)
# Keep the previous capstone matcher as a fallback for kernel variants
# with instruction forms that don't satisfy the fast-path masks.
if len(candidates) != 1:
candidates = self._collect_candidates_slow(ks, ke)
if len(candidates) != 1:
self._log(
f" [-] expected 1 task-conversion guard site, found {len(candidates)}"
)
return False
self.emit(
candidates[0], CMP_XZR_XZR, "cmp xzr,xzr [_task_conversion_eval_internal]"
)
return True
def _collect_candidates_fast(self, start, end):
cache = getattr(self, "_jb_scan_cache", None)
key = ("task_conversion_fast", start, end)
if cache is not None:
cached = cache.get(key)
if cached is not None:
return cached
out = []
for off in range(start + 4, end - 12, 4):
i0 = _rd32(self.raw, off)
if (i0 & self._CMP_XN_X0_MASK) != self._CMP_XN_X0_VAL:
continue
cmp_reg = (i0 >> 5) & 0x1F
p = _rd32(self.raw, off - 4)
if (p & self._LDR_X_UNSIGNED_MASK) != self._LDR_X_UNSIGNED_VAL:
continue
p_rt = p & 0x1F
p_rn = (p >> 5) & 0x1F
if p_rt != cmp_reg or p_rn != cmp_reg:
continue
i1 = _rd32(self.raw, off + 4)
if (i1 & self._BEQ_MASK) != self._BEQ_VAL:
continue
i2 = _rd32(self.raw, off + 8)
if (i2 & self._CMP_XN_X1_MASK) != self._CMP_XN_X1_VAL:
continue
if ((i2 >> 5) & 0x1F) != cmp_reg:
continue
i3 = _rd32(self.raw, off + 12)
if (i3 & self._BEQ_MASK) != self._BEQ_VAL:
continue
out.append(off)
if cache is not None:
cache[key] = out
return out
def _collect_candidates_slow(self, start, end):
cache = getattr(self, "_jb_scan_cache", None)
key = ("task_conversion_slow", start, end)
if cache is not None:
cached = cache.get(key)
if cached is not None:
return cached
out = []
for off in range(start + 4, end - 12, 4):
d0 = self._disas_at(off) d0 = self._disas_at(off)
if not d0: if not d0:
continue continue
@@ -55,15 +164,7 @@ class KernelJBPatchTaskConversionMixin:
if i3.mnemonic != "b.eq": if i3.mnemonic != "b.eq":
continue continue
candidates.append(off) out.append(off)
if cache is not None:
if len(candidates) != 1: cache[key] = out
self._log( return out
f" [-] expected 1 task-conversion guard site, found {len(candidates)}"
)
return False
self.emit(
candidates[0], CMP_XZR_XZR, "cmp xzr,xzr [_task_conversion_eval_internal]"
)
return True

View File

@@ -1,8 +1,14 @@
"""Mixin: APFS mount checks patches.""" """Mixin: APFS mount checks patches."""
from capstone.arm64_const import ARM64_OP_REG, ARM64_REG_W0, ARM64_REG_X0 from capstone.arm64_const import (
ARM64_OP_IMM,
ARM64_OP_REG,
ARM64_REG_W0,
ARM64_REG_W8,
ARM64_REG_X0,
)
from .kernel_asm import CMP_X0_X0, MOV_W0_0, _PACIBSP_U32, _rd32 from .kernel_asm import CMP_X0_X0, MOV_W0_0, NOP, _PACIBSP_U32, _rd32
class KernelPatchApfsMountMixin: class KernelPatchApfsMountMixin:
@@ -141,3 +147,102 @@ class KernelPatchApfsMountMixin:
self._log(" [-] BL + TBNZ w0 pattern not found") self._log(" [-] BL + TBNZ w0 pattern not found")
return False return False
def patch_apfs_get_dev_by_role_entitlement(self):
"""Patch 16: bypass APFS get-dev-by-role entitlement gate.
In handle_get_dev_by_role, APFS checks:
1) context predicate (BL ... ; CBZ X0, deny)
2) entitlement check for "com.apple.apfs.get-dev-by-role"
(BL ... ; CBZ W0, deny)
mount-phase-1 for /private/preboot and /private/xarts can fail here with:
"%s:%d: %s This operation needs entitlement" (line 13101)
We NOP the deny branches so the function continues into normal role lookup.
"""
self._log("\n[16] handle_get_dev_by_role: bypass entitlement gate")
str_off = self.find_string(b"com.apple.apfs.get-dev-by-role")
if str_off < 0:
self._log(" [-] entitlement string not found")
return False
refs = self.find_string_refs(str_off, *self.apfs_text)
if not refs:
self._log(" [-] no code refs to entitlement string")
return False
def _is_entitlement_error_block(target_off, func_end):
"""Heuristic: target block sets known entitlement-gate line IDs."""
scan_end = min(target_off + 0x30, func_end)
for off in range(target_off, scan_end, 4):
ins = self._disas_at(off)
if not ins:
continue
i = ins[0]
# Keep scan local to the direct target block.
# Crossing a call/unconditional jump usually means a different path.
if i.mnemonic in ("bl", "b", "ret", "retab"):
break
if i.mnemonic != "mov" or len(i.operands) < 2:
continue
if (
i.operands[0].type == ARM64_OP_REG
and i.operands[0].reg == ARM64_REG_W8
and i.operands[1].type == ARM64_OP_IMM
and i.operands[1].imm in (0x332D, 0x333B)
):
return True
return False
for ref in refs:
ref_off = ref[0]
func_start = self.find_function_start(ref_off)
if func_start < 0:
continue
func_end = min(func_start + 0x1200, self.size)
# Hardened logic:
# patch all CBZ/CBNZ on X0/W0 that jump into entitlement
# error blocks (line 0x33xx logger paths).
candidates = []
for off in range(func_start, func_end, 4):
ins = self._disas_at(off)
if not ins:
continue
i = ins[0]
if i.mnemonic not in ("cbz", "cbnz") or len(i.operands) < 2:
continue
if (
i.operands[0].type != ARM64_OP_REG
or i.operands[1].type != ARM64_OP_IMM
):
continue
if i.operands[0].reg not in (ARM64_REG_W0, ARM64_REG_X0):
continue
target = i.operands[1].imm
if not (func_start <= target < func_end):
continue
if target <= off:
continue
if not _is_entitlement_error_block(target, func_end):
continue
# Keep deterministic order; avoid duplicate offsets.
if all(prev_off != off for prev_off, _, _ in candidates):
candidates.append((off, i.operands[0].reg, target))
if candidates:
for off, reg, target in candidates:
gate = "context" if reg == ARM64_REG_X0 else "entitlement"
self.emit(
off,
NOP,
f"NOP [handle_get_dev_by_role {gate} check -> 0x{target:X}]",
)
return True
self._log(" [-] handle_get_dev_by_role entitlement gate pattern not found")
return False

View File

@@ -1,11 +1,63 @@
"""Mixin: debugger enablement patch.""" """Mixin: debugger enablement patch."""
from capstone.arm64_const import ARM64_OP_REG, ARM64_REG_X8
from .kernel_asm import MOV_X0_1, RET, _rd32, _rd64 from .kernel_asm import MOV_X0_1, RET, _rd32, _rd64
_GPR_X8_NUM = 8
class KernelPatchDebuggerMixin: class KernelPatchDebuggerMixin:
def _is_adrp_x8(self, insn):
"""Fast raw check: ADRP x8, <page>."""
return (insn & 0x9F000000) == 0x90000000 and (insn & 0x1F) == _GPR_X8_NUM
def _has_w_ldr_from_x8(self, func_off, max_insns=8):
"""Heuristic: first few instructions include ldr wN, [x8, ...]."""
for k in range(1, max_insns + 1):
off = func_off + k * 4
if off >= self.size:
break
dk = self._disas_at(off)
if (
dk
and dk[0].mnemonic == "ldr"
and dk[0].op_str.startswith("w")
and "x8" in dk[0].op_str
):
return True
return False
def _find_debugger_by_bl_histogram(self, kern_text_start, kern_text_end):
"""Find target from BL call histogram to avoid full __text scan."""
best_off = -1
best_callers = 0
for target_off, callers in self.bl_callers.items():
n_callers = len(callers)
# _PE_i_can_has_debugger is broadly used but far from panic-level fanout.
if n_callers < 50 or n_callers > 250:
continue
if target_off < kern_text_start or target_off >= kern_text_end:
continue
if target_off + 4 > self.size or (target_off & 3):
continue
first_insn = _rd32(self.raw, target_off)
if not self._is_adrp_x8(first_insn):
continue
if target_off >= 4 and not self._is_func_boundary(
_rd32(self.raw, target_off - 4)
):
continue
if not self._has_w_ldr_from_x8(target_off):
continue
if n_callers > best_callers:
best_callers = n_callers
best_off = target_off
return best_off, best_callers
def patch_PE_i_can_has_debugger(self): def patch_PE_i_can_has_debugger(self):
"""Patches 6-7: mov x0,#1; ret at _PE_i_can_has_debugger.""" """Patches 6-7: mov x0,#1; ret at _PE_i_can_has_debugger."""
self._log("\n[6-7] _PE_i_can_has_debugger: stub with mov x0,#1; ret") self._log("\n[6-7] _PE_i_can_has_debugger: stub with mov x0,#1; ret")
@@ -38,50 +90,15 @@ class KernelPatchDebuggerMixin:
) )
return True return True
# Strategy 2: code pattern — function starts with ADRP x8, # Strategy 2: pick candidates from BL histogram + lightweight signature checks.
# preceded by a function boundary, has many BL callers,
# and reads a 32-bit (w-register) value within first few instructions.
self._log(" [*] trying code pattern search...") self._log(" [*] trying code pattern search...")
# Determine kernel-only __text range from fileset entries if available # Determine kernel-only __text range from fileset entries if available
kern_text_start, kern_text_end = self._get_kernel_text_range() kern_text_start, kern_text_end = self._get_kernel_text_range()
best_off = -1 best_off, best_callers = self._find_debugger_by_bl_histogram(
best_callers = 0 kern_text_start, kern_text_end
for off in range(kern_text_start, kern_text_end - 12, 4): )
dis = self._disas_at(off)
if not dis or dis[0].mnemonic != "adrp":
continue
# Must target x8
if dis[0].operands[0].reg != ARM64_REG_X8:
continue
# Must be preceded by function boundary
if off >= 4:
prev = _rd32(self.raw, off - 4)
if not self._is_func_boundary(prev):
continue
# Must read a w-register (32-bit) from [x8, #imm] within first 6 instructions
has_w_load = False
for k in range(1, 7):
if off + k * 4 >= self.size:
break
dk = self._disas_at(off + k * 4)
if (
dk
and dk[0].mnemonic == "ldr"
and dk[0].op_str.startswith("w")
and "x8" in dk[0].op_str
):
has_w_load = True
break
if not has_w_load:
continue
# Count callers — _PE_i_can_has_debugger has ~80-200 callers
# (widely used but not a basic kernel primitive)
n_callers = len(self.bl_callers.get(off, []))
if 50 <= n_callers <= 250 and n_callers > best_callers:
best_callers = n_callers
best_off = off
if best_off >= 0: if best_off >= 0:
self._log( self._log(
@@ -91,5 +108,32 @@ class KernelPatchDebuggerMixin:
self.emit(best_off + 4, RET, "ret [_PE_i_can_has_debugger]") self.emit(best_off + 4, RET, "ret [_PE_i_can_has_debugger]")
return True return True
# Strategy 3 (fallback): full-range scan with raw opcode pre-filtering.
# Keeps cross-variant resilience while avoiding capstone on every address.
self._log(" [*] trying full scan fallback...")
best_off = -1
best_callers = 0
for off in range(kern_text_start, kern_text_end - 12, 4):
first_insn = _rd32(self.raw, off)
if not self._is_adrp_x8(first_insn):
continue
if off >= 4 and not self._is_func_boundary(_rd32(self.raw, off - 4)):
continue
if not self._has_w_ldr_from_x8(off):
continue
n_callers = len(self.bl_callers.get(off, []))
if 50 <= n_callers <= 250 and n_callers > best_callers:
best_callers = n_callers
best_off = off
if best_off >= 0:
self._log(
f" [+] fallback match at 0x{best_off:X} ({best_callers} callers)"
)
self.emit(best_off, MOV_X0_1, "mov x0,#1 [_PE_i_can_has_debugger]")
self.emit(best_off + 4, RET, "ret [_PE_i_can_has_debugger]")
return True
self._log(" [-] function not found") self._log(" [-] function not found")
return False return False

View File

@@ -42,6 +42,7 @@ from fw_patch import (
find_file, find_file,
) )
from patchers.iboot import IBootPatcher from patchers.iboot import IBootPatcher
from patchers.kernel import KernelPatcher
# ══════════════════════════════════════════════════════════════════ # ══════════════════════════════════════════════════════════════════
# Configuration # Configuration
@@ -212,6 +213,59 @@ def build_kernel_img4(kernel_src, output_dir, temp_dir, im4m_path, output_name,
print(f" [+] {output_name}") print(f" [+] {output_name}")
def _find_pristine_cloudos_kernel():
"""Find a pristine CloudOS vphone600 research kernel from project ipsws/."""
env_path = os.environ.get("RAMDISK_BASE_KERNEL", "").strip()
if env_path:
p = os.path.abspath(env_path)
if os.path.isfile(p):
return p
print(f" [!] RAMDISK_BASE_KERNEL set but not found: {p}")
project_root = os.path.abspath(os.path.join(_SCRIPT_DIR, ".."))
patterns = [
os.path.join(project_root, "ipsws", "PCC-CloudOS*", "kernelcache.research.vphone600"),
os.path.join(project_root, "ipsws", "*CloudOS*", "kernelcache.research.vphone600"),
]
for pattern in patterns:
matches = sorted(glob.glob(pattern))
if matches:
return matches[0]
return None
def derive_ramdisk_kernel_source(kc_src, temp_dir):
"""Get source kernel for krnl.ramdisk.img4 entirely within ramdisk_build flow.
Priority:
1) Existing legacy snapshot next to restore kernel (`*.ramdisk`)
2) Derive from pristine CloudOS kernel by applying base KernelPatcher
"""
legacy_snapshot = f"{kc_src}{RAMDISK_KERNEL_SUFFIX}"
if os.path.isfile(legacy_snapshot):
print(f" found legacy ramdisk kernel snapshot: {legacy_snapshot}")
return legacy_snapshot
pristine = _find_pristine_cloudos_kernel()
if not pristine:
print(" [!] pristine CloudOS kernel not found; skipping ramdisk-specific kernel image")
return None
print(f" deriving ramdisk kernel from pristine source: {pristine}")
im4p_obj, data, was_im4p, original_raw = load_firmware(pristine)
kp = KernelPatcher(data)
n = kp.apply()
print(f" [+] {n} base kernel patches applied for ramdisk variant")
out_path = os.path.join(temp_dir, f"kernelcache.research.vphone600{RAMDISK_KERNEL_SUFFIX}")
if was_im4p and im4p_obj is not None:
_save_im4p_with_payp(out_path, im4p_obj.fourcc, data, original_raw)
else:
with open(out_path, "wb") as f:
f.write(data)
return out_path
# ══════════════════════════════════════════════════════════════════ # ══════════════════════════════════════════════════════════════════
# iBEC boot-args patching # iBEC boot-args patching
# ══════════════════════════════════════════════════════════════════ # ══════════════════════════════════════════════════════════════════
@@ -587,10 +641,9 @@ def main():
], ],
"kernelcache", "kernelcache",
) )
kc_ramdisk_src = f"{kc_src}{RAMDISK_KERNEL_SUFFIX}" kc_ramdisk_src = derive_ramdisk_kernel_source(kc_src, temp_dir)
if os.path.isfile(kc_ramdisk_src): if kc_ramdisk_src:
print(f" found ramdisk kernel snapshot: {kc_ramdisk_src}") print(f" building {RAMDISK_KERNEL_IMG4} from ramdisk kernel source")
print(f" building {RAMDISK_KERNEL_IMG4} from base/dev snapshot")
build_kernel_img4( build_kernel_img4(
kc_ramdisk_src, kc_ramdisk_src,
output_dir, output_dir,
@@ -599,24 +652,16 @@ def main():
RAMDISK_KERNEL_IMG4, RAMDISK_KERNEL_IMG4,
"kcache_ramdisk", "kcache_ramdisk",
) )
print(" building krnl.img4 from restore kernel (post-JB)") print(" building krnl.img4 from restore kernel")
build_kernel_img4(
kc_src, build_kernel_img4(
output_dir, kc_src,
temp_dir, output_dir,
im4m_path, temp_dir,
"krnl.img4", im4m_path,
"kcache_jb", "krnl.img4",
) "kcache",
else: )
build_kernel_img4(
kc_src,
output_dir,
temp_dir,
im4m_path,
"krnl.img4",
"kcache",
)
# ── 8. Ramdisk + Trustcache ────────────────────────────────── # ── 8. Ramdisk + Trustcache ──────────────────────────────────
print(f"\n{'=' * 60}") print(f"\n{'=' * 60}")

View File

@@ -0,0 +1,70 @@
#!/usr/bin/env zsh
set -euo pipefail
# Save a reusable kernel checkpoint for variant testing.
#
# BASE_PATCH chooses the patch pipeline before saving the checkpoint:
# normal -> fw_patch
# dev -> fw_patch_dev
# jb -> fw_patch_jb
PROJECT_DIR="$(cd "$(dirname "${0:a:h}")" && pwd)"
cd "$PROJECT_DIR"
VM_DIR="${VM_DIR:-vm}"
BASE_PATCH="${BASE_PATCH:-jb}"
case "$BASE_PATCH" in
normal)
PATCH_TARGET="fw_patch"
;;
dev)
PATCH_TARGET="fw_patch_dev"
;;
jb)
PATCH_TARGET="fw_patch_jb"
;;
*)
echo "[-] Invalid BASE_PATCH: $BASE_PATCH"
echo " Use BASE_PATCH=normal|dev|jb"
exit 1
;;
esac
echo "[checkpoint] base_patch=$BASE_PATCH"
echo "[checkpoint] killing existing vphone-cli..."
pkill -9 vphone-cli 2>/dev/null || true
sleep 1
echo "[checkpoint] fw_prepare..."
make fw_prepare VM_DIR="$VM_DIR"
echo "[checkpoint] $PATCH_TARGET..."
make "$PATCH_TARGET" VM_DIR="$VM_DIR"
RESTORE_DIR=$(find "$VM_DIR" -maxdepth 1 -type d -name '*Restore*' | head -1)
if [[ -z "$RESTORE_DIR" ]]; then
echo "[-] No *Restore* directory found in $VM_DIR"
exit 1
fi
KERNEL_PATH=$(find "$RESTORE_DIR" -name 'kernelcache.research.vphone600' | head -1)
if [[ -z "$KERNEL_PATH" ]]; then
echo "[-] kernelcache not found in $RESTORE_DIR"
exit 1
fi
SOURCE_KERNEL="$KERNEL_PATH"
if [[ "$BASE_PATCH" == "jb" ]]; then
RAMDISK_SOURCE="${KERNEL_PATH}.ramdisk"
if [[ -f "$RAMDISK_SOURCE" ]]; then
SOURCE_KERNEL="$RAMDISK_SOURCE"
echo "[checkpoint] using JB base snapshot: $(basename "$RAMDISK_SOURCE")"
fi
fi
CHECKPOINT_PATH="${KERNEL_PATH}.checkpoint.${BASE_PATCH}.backup"
cp "$SOURCE_KERNEL" "$CHECKPOINT_PATH"
echo "[checkpoint] saved: $CHECKPOINT_PATH ($(wc -c < "$CHECKPOINT_PATH") bytes)"

37
scripts/testing_exec.sh Executable file
View File

@@ -0,0 +1,37 @@
#!/usr/bin/env zsh
set -euo pipefail
# Quick testing flow:
# pkill -9 vphone-cli
# make fw_prepare
# make fw_patch_jb
# make testing_ramdisk_build
# make testing_ramdisk_send &
# make boot_dfu
PROJECT_DIR="$(cd "$(dirname "${0:a:h}")" && pwd)"
cd "$PROJECT_DIR"
VM_DIR="${VM_DIR:-vm}"
echo "[testing_exec] killing existing vphone-cli..."
pkill -9 vphone-cli 2>/dev/null || true
sleep 1
echo "[testing_exec] fw_prepare..."
make fw_prepare VM_DIR="$VM_DIR"
echo "[testing_exec] fw_patch_jb..."
make fw_patch_jb VM_DIR="$VM_DIR"
echo "[testing_exec] testing_ramdisk_build..."
make testing_ramdisk_build VM_DIR="$VM_DIR"
echo "[testing_exec] testing_ramdisk_send (background)..."
make testing_ramdisk_send VM_DIR="$VM_DIR" &
SEND_PID=$!
echo "[testing_exec] boot_dfu..."
make boot_dfu VM_DIR="$VM_DIR"
wait "$SEND_PID" 2>/dev/null || true

136
scripts/testing_kernel_patch.py Executable file
View File

@@ -0,0 +1,136 @@
#!/usr/bin/env python3
"""
testing_kernel_patch.py — Restore a saved kernel checkpoint and apply selected patches.
Usage:
python3 testing_kernel_patch.py <vm_dir> --base-patch jb patch_kcall10
python3 testing_kernel_patch.py <vm_dir> --base-patch normal patch_apfs_get_dev_by_role_entitlement
python3 testing_kernel_patch.py <vm_dir> patch_mac_mount patch_dounmount
Notes:
- `--base-patch` selects which checkpoint file to restore first:
kernelcache.research.vphone600.checkpoint.<base_patch>.backup
Fallback: legacy `.base_backup`.
- Patch names can come from either `KernelPatcher` (base) or `KernelJBPatcher` (JB).
"""
import argparse
import os
import shutil
import sys
from fw_patch import find_file, find_restore_dir, load_firmware, save_firmware
from patchers.kernel import KernelPatcher
from patchers.kernel_jb import KernelJBPatcher
def parse_args() -> argparse.Namespace:
parser = argparse.ArgumentParser(description="Restore checkpoint and apply selected kernel patch methods")
parser.add_argument("vm_dir", help="VM directory (contains iPhone*_Restore)")
parser.add_argument(
"patch_names",
nargs="+",
help="Patch method names to apply (e.g. patch_kcall10)",
)
parser.add_argument(
"--base-patch",
choices=("normal", "dev", "jb"),
default=os.environ.get("BASE_PATCH") or "jb",
help="Checkpoint variant to restore first (default: jb)",
)
return parser.parse_args()
def resolve_checkpoint(kernel_path: str, base_patch: str) -> str:
preferred = f"{kernel_path}.checkpoint.{base_patch}.backup"
legacy = f"{kernel_path}.base_backup"
if os.path.exists(preferred):
return preferred
if os.path.exists(legacy):
print(f"[!] preferred checkpoint missing, using legacy backup: {legacy}")
return legacy
print(f"[-] No checkpoint found.")
print(f" Missing: {preferred}")
print(f" Missing: {legacy}")
print(f" Run 'make testing_checkpoint_save BASE_PATCH={base_patch}' first.")
sys.exit(1)
def list_available_methods(base_patcher: KernelPatcher, jb_patcher: KernelJBPatcher) -> None:
names = set()
for obj in (base_patcher, jb_patcher):
for name in dir(obj):
if name.startswith("patch_") and callable(getattr(obj, name)):
names.add(name)
print(" Available patches:")
for name in sorted(names):
print(f" {name}")
def main() -> None:
args = parse_args()
vm_dir = os.path.abspath(args.vm_dir)
if not os.path.isdir(vm_dir):
print(f"[-] Not a directory: {vm_dir}")
sys.exit(1)
restore_dir = find_restore_dir(vm_dir)
if not restore_dir:
print(f"[-] No *Restore* directory found in {vm_dir}")
sys.exit(1)
kernel_path = find_file(restore_dir, ["kernelcache.research.vphone600"], "kernelcache")
checkpoint_path = resolve_checkpoint(kernel_path, args.base_patch)
shutil.copy2(checkpoint_path, kernel_path)
print(f"[*] Restored checkpoint: {checkpoint_path}")
print(f"[*] Target kernel: {kernel_path}")
im4p, data, was_im4p, original_raw = load_firmware(kernel_path)
fmt = "IM4P" if was_im4p else "raw"
print(f"[*] Loaded: {fmt}, {len(data)} bytes")
base_patcher = KernelPatcher(data)
jb_patcher = KernelJBPatcher(data)
selected = []
for patch_name in args.patch_names:
method = getattr(jb_patcher, patch_name, None)
if callable(method):
selected.append(("jb", patch_name, method))
continue
method = getattr(base_patcher, patch_name, None)
if callable(method):
selected.append(("base", patch_name, method))
continue
print(f"[-] Unknown patch: {patch_name}")
list_available_methods(base_patcher, jb_patcher)
sys.exit(1)
print(f"[*] Applying {len(selected)} method(s)...")
for scope, patch_name, method in selected:
print(f" - {patch_name} [{scope}]")
method()
applied = 0
for off, patch_bytes, _ in base_patcher.patches:
data[off : off + len(patch_bytes)] = patch_bytes
applied += 1
for off, patch_bytes, _ in jb_patcher.patches:
data[off : off + len(patch_bytes)] = patch_bytes
applied += 1
print(f"[+] Applied low-level patches: {applied}")
save_firmware(kernel_path, im4p, data, was_im4p, original_raw)
print(f"[+] Saved: {kernel_path}")
if __name__ == "__main__":
main()

362
scripts/testing_ramdisk_build.py Executable file
View File

@@ -0,0 +1,362 @@
#!/usr/bin/env python3
"""
testing_ramdisk_build.py — Build a minimal signed boot chain for testing.
Packs firmware components (iBSS, iBEC, SPTM, DeviceTree, SEP, TXM,
kernelcache) with the stock ramdisk into signed IMG4 files. No SSH
tools or CFW — just the base boot chain for quick patch verification.
Usage:
python3 testing_ramdisk_build.py [vm_directory]
Prerequisites:
pip install pyimg4
Run fw_patch.py first to patch boot-chain components.
"""
import glob
import gzip
import os
import plistlib
import shutil
import subprocess
import sys
_SCRIPT_DIR = os.path.dirname(os.path.abspath(__file__))
if _SCRIPT_DIR not in sys.path:
sys.path.insert(0, _SCRIPT_DIR)
from pyimg4 import IM4M, IM4P, IMG4
from fw_patch import (
load_firmware,
_save_im4p_with_payp,
find_restore_dir,
find_file,
)
# ══════════════════════════════════════════════════════════════════
# Configuration
# ══════════════════════════════════════════════════════════════════
OUTPUT_DIR = "TestingRamdisk"
TEMP_DIR = "testing_ramdisk_temp"
# IM4P fourccs for restore mode
TXM_FOURCC = "trxm"
KERNEL_FOURCC = "rkrn"
# ══════════════════════════════════════════════════════════════════
# SHSH / signing helpers
# ══════════════════════════════════════════════════════════════════
def find_shsh(shsh_dir):
"""Find first SHSH blob in directory."""
for ext in ("*.shsh", "*.shsh2"):
matches = sorted(glob.glob(os.path.join(shsh_dir, ext)))
if matches:
return matches[0]
return None
def extract_im4m(shsh_path, im4m_path):
"""Extract IM4M manifest from SHSH blob (handles gzip-compressed)."""
raw = open(shsh_path, "rb").read()
if raw[:2] == b"\x1f\x8b":
raw = gzip.decompress(raw)
tmp = shsh_path + ".tmp"
try:
open(tmp, "wb").write(raw)
subprocess.run(
["pyimg4", "im4m", "extract", "-i", tmp, "-o", im4m_path],
check=True,
capture_output=True,
)
finally:
if os.path.exists(tmp):
os.remove(tmp)
def sign_img4(im4p_path, img4_path, im4m_path, tag=None):
"""Create IMG4 from IM4P + IM4M using pyimg4 Python API."""
im4p = IM4P(open(im4p_path, "rb").read())
if tag:
im4p.fourcc = tag
im4m = IM4M(open(im4m_path, "rb").read())
img4 = IMG4(im4p=im4p, im4m=im4m)
with open(img4_path, "wb") as f:
f.write(img4.output())
# ══════════════════════════════════════════════════════════════════
# Firmware extraction
# ══════════════════════════════════════════════════════════════════
def extract_to_raw(src_path, raw_path):
"""Extract IM4P payload to .raw file. Returns (im4p_obj, data, original_raw)."""
im4p, data, was_im4p, original_raw = load_firmware(src_path)
with open(raw_path, "wb") as f:
f.write(bytes(data))
return im4p, data, original_raw
def create_im4p_uncompressed(raw_data, fourcc, description, output_path):
"""Create uncompressed IM4P from raw data."""
new_im4p = IM4P(
fourcc=fourcc,
description=description,
payload=bytes(raw_data),
)
with open(output_path, "wb") as f:
f.write(new_im4p.output())
# ══════════════════════════════════════════════════════════════════
# Main
# ══════════════════════════════════════════════════════════════════
def main():
vm_dir = os.path.abspath(sys.argv[1] if len(sys.argv) > 1 else os.getcwd())
if not os.path.isdir(vm_dir):
print(f"[-] Not a directory: {vm_dir}")
sys.exit(1)
# Find SHSH
shsh_dir = os.path.join(vm_dir, "shsh")
shsh_path = find_shsh(shsh_dir)
if not shsh_path:
print(f"[-] No SHSH blob found in {shsh_dir}/")
print(" Place your .shsh file in the shsh/ directory.")
sys.exit(1)
# Find restore directory
restore_dir = find_restore_dir(vm_dir)
if not restore_dir:
print(f"[-] No *Restore* directory found in {vm_dir}")
sys.exit(1)
# Create temp and output directories
temp_dir = os.path.join(vm_dir, TEMP_DIR)
output_dir = os.path.join(vm_dir, OUTPUT_DIR)
for d in (temp_dir, output_dir):
if os.path.exists(d):
shutil.rmtree(d)
os.makedirs(d)
print(f"[*] Testing ramdisk — boot chain only (no SSH, no CFW)")
print(f"[*] VM directory: {vm_dir}")
print(f"[*] Restore directory: {restore_dir}")
print(f"[*] SHSH blob: {shsh_path}")
# Extract IM4M from SHSH
im4m_path = os.path.join(temp_dir, "vphone.im4m")
print(f"\n[*] Extracting IM4M from SHSH...")
extract_im4m(shsh_path, im4m_path)
# ── 1. iBSS (already patched — extract & sign) ───────────────
print(f"\n{'=' * 60}")
print(f" 1. iBSS (already patched — extract & sign)")
print(f"{'=' * 60}")
ibss_src = find_file(
restore_dir,
["Firmware/dfu/iBSS.vresearch101.RELEASE.im4p"],
"iBSS",
)
ibss_raw = os.path.join(temp_dir, "iBSS.raw")
ibss_im4p = os.path.join(temp_dir, "iBSS.im4p")
im4p_obj, data, _ = extract_to_raw(ibss_src, ibss_raw)
create_im4p_uncompressed(data, im4p_obj.fourcc, im4p_obj.description, ibss_im4p)
sign_img4(
ibss_im4p,
os.path.join(output_dir, "iBSS.vresearch101.RELEASE.img4"),
im4m_path,
)
print(f" [+] iBSS.vresearch101.RELEASE.img4")
# ── 2. iBEC (already patched — sign as-is, no boot-args change)
print(f"\n{'=' * 60}")
print(f" 2. iBEC (already patched — sign as-is)")
print(f"{'=' * 60}")
ibec_src = find_file(
restore_dir,
["Firmware/dfu/iBEC.vresearch101.RELEASE.im4p"],
"iBEC",
)
ibec_raw = os.path.join(temp_dir, "iBEC.raw")
ibec_im4p = os.path.join(temp_dir, "iBEC.im4p")
im4p_obj, data, _ = extract_to_raw(ibec_src, ibec_raw)
create_im4p_uncompressed(data, im4p_obj.fourcc, im4p_obj.description, ibec_im4p)
sign_img4(
ibec_im4p,
os.path.join(output_dir, "iBEC.vresearch101.RELEASE.img4"),
im4m_path,
)
print(f" [+] iBEC.vresearch101.RELEASE.img4")
# ── 3. SPTM (sign only) ─────────────────────────────────────
print(f"\n{'=' * 60}")
print(f" 3. SPTM (sign only)")
print(f"{'=' * 60}")
sptm_src = find_file(
restore_dir,
["Firmware/sptm.vresearch1.release.im4p"],
"SPTM",
)
sign_img4(
sptm_src,
os.path.join(output_dir, "sptm.vresearch1.release.img4"),
im4m_path,
tag="sptm",
)
print(f" [+] sptm.vresearch1.release.img4")
# ── 4. DeviceTree (sign only) ────────────────────────────────
print(f"\n{'=' * 60}")
print(f" 4. DeviceTree (sign only)")
print(f"{'=' * 60}")
dt_src = find_file(
restore_dir,
["Firmware/all_flash/DeviceTree.vphone600ap.im4p"],
"DeviceTree",
)
sign_img4(
dt_src,
os.path.join(output_dir, "DeviceTree.vphone600ap.img4"),
im4m_path,
tag="rdtr",
)
print(f" [+] DeviceTree.vphone600ap.img4")
# ── 5. SEP (sign only) ───────────────────────────────────────
print(f"\n{'=' * 60}")
print(f" 5. SEP (sign only)")
print(f"{'=' * 60}")
sep_src = find_file(
restore_dir,
["Firmware/all_flash/sep-firmware.vresearch101.RELEASE.im4p"],
"SEP",
)
sign_img4(
sep_src,
os.path.join(output_dir, "sep-firmware.vresearch101.RELEASE.img4"),
im4m_path,
tag="rsep",
)
print(f" [+] sep-firmware.vresearch101.RELEASE.img4")
# ── 6. TXM (already patched — repack & sign) ─────────────────
print(f"\n{'=' * 60}")
print(f" 6. TXM (already patched — repack & sign)")
print(f"{'=' * 60}")
txm_src = find_file(
restore_dir,
["Firmware/txm.iphoneos.research.im4p"],
"TXM",
)
txm_raw = os.path.join(temp_dir, "txm.raw")
im4p_obj, data, original_raw = extract_to_raw(txm_src, txm_raw)
txm_im4p = os.path.join(temp_dir, "txm.im4p")
_save_im4p_with_payp(txm_im4p, TXM_FOURCC, data, original_raw)
sign_img4(txm_im4p, os.path.join(output_dir, "txm.img4"), im4m_path)
print(f" [+] txm.img4")
# ── 7. Kernelcache (already patched — repack as rkrn) ────────
print(f"\n{'=' * 60}")
print(f" 7. Kernelcache (already patched — repack as rkrn)")
print(f"{'=' * 60}")
kc_src = find_file(
restore_dir,
["kernelcache.research.vphone600"],
"kernelcache",
)
kc_raw = os.path.join(temp_dir, "kcache.raw")
im4p_obj, data, original_raw = extract_to_raw(kc_src, kc_raw)
print(f" format: IM4P, {len(data)} bytes")
kc_im4p = os.path.join(temp_dir, "krnl.im4p")
_save_im4p_with_payp(kc_im4p, KERNEL_FOURCC, data, original_raw)
sign_img4(kc_im4p, os.path.join(output_dir, "krnl.img4"), im4m_path)
print(f" [+] krnl.img4")
# ── 8. Base ramdisk + trustcache ─────────────────────────────
print(f"\n{'=' * 60}")
print(f" 8. Base ramdisk + trustcache")
print(f"{'=' * 60}")
tc_bin = shutil.which("trustcache")
if not tc_bin:
print("[-] trustcache not found. Run: make setup_tools")
sys.exit(1)
# Read RestoreRamDisk path from BuildManifest
bm_path = os.path.join(restore_dir, "BuildManifest.plist")
with open(bm_path, "rb") as f:
bm = plistlib.load(f)
ramdisk_rel = bm["BuildIdentities"][0]["Manifest"]["RestoreRamDisk"]["Info"]["Path"]
ramdisk_src = os.path.join(restore_dir, ramdisk_rel)
# Extract base ramdisk DMG
ramdisk_raw = os.path.join(temp_dir, "ramdisk.raw.dmg")
subprocess.run(
["pyimg4", "im4p", "extract", "-i", ramdisk_src, "-o", ramdisk_raw],
check=True,
capture_output=True,
)
# Mount base ramdisk, build trustcache from its contents
mountpoint = os.path.join(vm_dir, "testing_ramdisk_mnt")
os.makedirs(mountpoint, exist_ok=True)
try:
subprocess.run(
["sudo", "hdiutil", "attach", "-mountpoint", mountpoint,
ramdisk_raw, "-owners", "off"],
check=True,
)
print(" Building trustcache from base ramdisk...")
tc_raw = os.path.join(temp_dir, "ramdisk.tc")
tc_im4p = os.path.join(temp_dir, "trustcache.im4p")
subprocess.run([tc_bin, "create", tc_raw, mountpoint], check=True, capture_output=True)
subprocess.run(
["pyimg4", "im4p", "create", "-i", tc_raw, "-o", tc_im4p, "-f", "rtsc"],
check=True,
capture_output=True,
)
sign_img4(tc_im4p, os.path.join(output_dir, "trustcache.img4"), im4m_path)
print(f" [+] trustcache.img4")
finally:
subprocess.run(
["sudo", "hdiutil", "detach", "-force", mountpoint], capture_output=True
)
# Sign base ramdisk as-is
rd_im4p = os.path.join(temp_dir, "ramdisk.im4p")
subprocess.run(
["pyimg4", "im4p", "create", "-i", ramdisk_raw, "-o", rd_im4p, "-f", "rdsk"],
check=True,
capture_output=True,
)
sign_img4(rd_im4p, os.path.join(output_dir, "ramdisk.img4"), im4m_path)
print(f" [+] ramdisk.img4 (base, unmodified)")
# ── Cleanup ──────────────────────────────────────────────────
print(f"\n[*] Cleaning up {TEMP_DIR}/...")
shutil.rmtree(temp_dir, ignore_errors=True)
# ── Summary ──────────────────────────────────────────────────
print(f"\n{'=' * 60}")
print(f" Testing ramdisk build complete!")
print(f" Output: {output_dir}/")
print(f" Note: boot chain only — no SSH, no CFW")
print(f"{'=' * 60}")
for f in sorted(os.listdir(output_dir)):
size = os.path.getsize(os.path.join(output_dir, f))
print(f" {f:45s} {size:>10,} bytes")
if __name__ == "__main__":
main()

69
scripts/testing_ramdisk_send.sh Executable file
View File

@@ -0,0 +1,69 @@
#!/bin/zsh
# testing_ramdisk_send.sh — Send testing boot chain to device via irecovery.
#
# Usage: ./testing_ramdisk_send.sh [testing_ramdisk_dir]
#
# Expects device in DFU mode. Loads iBSS/iBEC, then boots with
# SPTM, TXM, trustcache, ramdisk, device tree, SEP, and kernel.
# Boot chain only — no SSH, no CFW.
set -euo pipefail
IRECOVERY="${IRECOVERY:-irecovery}"
RAMDISK_DIR="${1:-TestingRamdisk}"
if [[ ! -d "$RAMDISK_DIR" ]]; then
echo "[-] Testing ramdisk directory not found: $RAMDISK_DIR"
echo " Run 'make testing_ramdisk_build' first."
exit 1
fi
echo "[*] Sending testing boot chain from $RAMDISK_DIR ..."
echo " (boot chain only — no SSH, no CFW)"
# 1. Load iBSS + iBEC (DFU → recovery)
echo " [1/8] Loading iBSS..."
"$IRECOVERY" -f "$RAMDISK_DIR/iBSS.vresearch101.RELEASE.img4"
echo " [2/8] Loading iBEC..."
"$IRECOVERY" -f "$RAMDISK_DIR/iBEC.vresearch101.RELEASE.img4"
"$IRECOVERY" -c go
sleep 1
# 2. Load SPTM
echo " [3/8] Loading SPTM..."
"$IRECOVERY" -f "$RAMDISK_DIR/sptm.vresearch1.release.img4"
"$IRECOVERY" -c firmware
# 3. Load TXM
echo " [4/8] Loading TXM..."
"$IRECOVERY" -f "$RAMDISK_DIR/txm.img4"
"$IRECOVERY" -c firmware
# 4. Load trustcache
echo " [5/8] Loading trustcache..."
"$IRECOVERY" -f "$RAMDISK_DIR/trustcache.img4"
"$IRECOVERY" -c firmware
# 5. Load ramdisk
echo " [6/8] Loading ramdisk..."
"$IRECOVERY" -f "$RAMDISK_DIR/ramdisk.img4"
sleep 2
"$IRECOVERY" -c ramdisk
# 6. Load device tree
echo " [7/8] Loading device tree..."
"$IRECOVERY" -f "$RAMDISK_DIR/DeviceTree.vphone600ap.img4"
"$IRECOVERY" -c devicetree
# 7. Load SEP
echo " [8/8] Loading SEP..."
"$IRECOVERY" -f "$RAMDISK_DIR/sep-firmware.vresearch101.RELEASE.img4"
"$IRECOVERY" -c firmware
# 8. Load kernel and boot
echo " [*] Booting kernel..."
"$IRECOVERY" -f "$RAMDISK_DIR/krnl.img4"
"$IRECOVERY" -c bootx
echo "[+] Boot sequence sent."