refactor: split patchers into base/mixin modules and format scripts

This commit is contained in:
Lakr
2026-03-04 15:19:17 +08:00
parent e695526840
commit 62b1564e20
64 changed files with 5579 additions and 5786 deletions

444
AGENTS.md
View File

@@ -20,9 +20,15 @@ Virtual iPhone boot tool using Apple's Virtualization.framework with PCC researc
- If blocked or waiting on user input, write the exact blocker and next action in `/TODO.md`.
- If not exists, continue existing work until complete. If exists, follow `/TODO.md` instructions.
## Project Overview
## Firmware Variants
CLI tool that boots virtual iPhones (PV=3) via Apple's Virtualization.framework, targeting Private Cloud Compute (PCC) research VMs. Used for iOS security research — firmware patching, boot chain modification, and runtime instrumentation.
| Variant | Boot Chain | CFW | Make Targets |
| ------------------- | :--------: | :-------: | ---------------------------------- |
| **Regular** | 38 patches | 10 phases | `fw_patch` + `cfw_install` |
| **Development** | 47 patches | 12 phases | `fw_patch_dev` + `cfw_install_dev` |
| **Jailbreak (WIP)** | 84 patches | 14 phases | `fw_patch_jb` + `cfw_install_jb` |
See `research/` for detailed firmware pipeline, component origins, patch breakdowns, and boot flow documentation.
## Architecture
@@ -50,241 +56,69 @@ sources/
├── VPhoneWindowController.swift # @MainActor VM window management + toolbar
├── VPhoneKeyHelper.swift # Keyboard/hardware key event dispatch to VM
├── VPhoneLocationProvider.swift # CoreLocation → guest forwarding over vsock
├── VPhoneScreenRecorder.swift # VM screen recording to file
│ # Menu bar (extensions on VPhoneMenuController)
├── VPhoneMenuController.swift # Menu bar controller (builds Keys, Type, Location, Connect)
├── VPhoneMenuKeys.swift # Keys menu — home, power, volume, spotlight buttons
├── VPhoneMenuController.swift # Menu bar controller
├── VPhoneMenuKeys.swift # Keys menu — home, power, volume, spotlight
├── VPhoneMenuType.swift # Type menu — paste ASCII text to guest
├── VPhoneMenuLocation.swift # Location menu — host location sync toggle
├── VPhoneMenuConnect.swift # Connect menu — devmode, ping, version, file browser
├── VPhoneMenuInstall.swift # Install menu — IPA installation to guest
├── VPhoneMenuRecord.swift # Record menu — screen recording controls
│ # IPA installation
├── VPhoneIPAInstaller.swift # IPA extraction, signing, and installation
├── VPhoneSigner.swift # Mach-O binary signing utilities
│ # File browser (SwiftUI)
├── VPhoneFileWindowController.swift # File browser window (NSHostingController)
├── VPhoneFileBrowserView.swift # SwiftUI file browser with search + drag-drop
├── VPhoneFileBrowserModel.swift # @Observable file browser state + transfers
└── VPhoneRemoteFile.swift # Remote file data model (path, size, permissions)
└── VPhoneRemoteFile.swift # Remote file data model
scripts/
├── vphoned/ # Guest daemon (Objective-C, runs inside iOS VM)
│ ├── vphoned.m # Main — vsock listener, message dispatch, auto-update
│ ├── vphoned_protocol.{h,m} # Length-prefixed JSON framing (shared with host)
│ ├── vphoned_hid.{h,m} # HID event injection (IOHIDEvent)
│ ├── vphoned_devmode.{h,m} # Developer Mode query/enable via XPC
│ ├── vphoned_location.{h,m} # CLLocationManager spoofing
│ ├── vphoned_files.{h,m} # File operations (list, get, put, mkdir, delete, rename)
│ ├── vphoned.plist # LaunchDaemon plist
│ ├── entitlements.plist # Guest entitlements
│ └── signcert.p12 # Signing certificate for re-signing
├── patchers/ # Python patcher package
│ ├── iboot.py # Dynamic iBoot patcher (iBSS/iBEC/LLB)
│ ├── iboot_jb.py # JB extension iBoot patcher (nonce skip)
│ ├── kernel.py # Dynamic kernel patcher (25 patches)
│ ├── kernel_jb.py # JB extension kernel patcher (~34 patches)
│ ├── txm.py # Dynamic TXM patcher
│ ├── txm_jb.py # JB extension TXM patcher (~13 patches)
│ └── cfw.py # CFW binary patcher (base + JB jetsam)
├── resources/ # Resource archives
├── cfw_input.tar.zst
├── cfw_jb_input.tar.zst # JB: procursus bootstrap + Sileo
│ └── ramdisk_input.tar.zst
├── fw_prepare.sh # Downloads IPSWs, merges cloudOS into iPhone
├── fw_manifest.py # Generates hybrid BuildManifest.plist & Restore.plist
├── fw_patch.py # Patches 6 boot-chain components (41+ modifications)
├── fw_patch_jb.py # Runs fw_patch + JB extension patches (iBSS/TXM/kernel)
── ramdisk_build.py # Builds SSH ramdisk with trustcache
├── ramdisk_send.sh # Sends ramdisk to device via irecovery
├── cfw_install.sh # Installs custom firmware to VM disk
├── cfw_install_jb.sh # Wrapper: cfw_install with JB phases enabled
├── vm_create.sh # Creates VM directory (disk, SEP storage, ROMs)
├── setup_venv.sh # Creates Python venv with native keystone dylib
└── setup_libimobiledevice.sh # Builds libimobiledevice toolchain from source
├── vphoned/ # Guest daemon (ObjC, runs inside iOS VM over vsock)
├── patchers/ # Python patcher modules
│ ├── iboot.py # iBoot patcher (iBSS/iBEC/LLB)
│ ├── iboot_jb.py # JB: iBoot nonce skip
│ ├── kernel.py # Kernel patcher (25 patches)
│ ├── kernel_jb.py # JB: kernel patches (~34)
│ ├── txm.py # TXM patcher
│ ├── txm_dev.py # Dev: TXM entitlements/debugger/dev mode
│ ├── txm_jb.py # JB: TXM CS bypass (~13)
│ └── cfw.py # CFW binary patcher
├── resources/ # Resource archives (git submodule)
├── patches/ # Build-time patches (libirecovery)
├── fw_prepare.sh # Download IPSWs, merge cloudOS into iPhone
├── fw_manifest.py # Generate hybrid BuildManifest/Restore plists
├── fw_patch.py # Patch boot chain (regular)
├── fw_patch_dev.py # Regular + dev TXM patches
├── fw_patch_jb.py # Regular + JB extensions
├── ramdisk_build.py # Build SSH ramdisk with trustcache
├── ramdisk_send.sh # Send ramdisk to device via irecovery
├── cfw_install.sh # Install CFW (regular)
├── cfw_install_dev.sh # Regular + rpcserver daemon
├── cfw_install_jb.sh # Regular + jetsam fix + procursus
├── vm_create.sh # Create VM directory
├── setup_machine.sh # Full automation (setup → first boot)
├── setup_tools.sh # Install deps, build toolchain, create venv
├── setup_venv.sh # Create Python venv
── setup_libimobiledevice.sh # Build libimobiledevice from source
research/
├── patch_comparison_all_variants.md # Regular/Dev/JB patch comparison table
└── ... # Component analysis and architecture docs
research/ # Detailed firmware/patch documentation
```
### Key Patterns
- **Private API access:** Private Virtualization.framework APIs are called via the [Dynamic](https://github.com/mhdhejazi/Dynamic) library (runtime method dispatch from pure Swift). No ObjC bridge needed.
- **App lifecycle:** Explicit `main.swift` creates `NSApplication` + `VPhoneAppDelegate`. CLI args parsed before the run loop starts. AppDelegate drives VM start, window, and shutdown.
- **Configuration:** CLI options parsed via `ArgumentParser`, converted to `VPhoneVirtualMachine.Options` struct, then used to build `VZVirtualMachineConfiguration`.
- **Error handling:** `VPhoneError` enum with `CustomStringConvertible` for user-facing messages.
- **Window management:** `VPhoneWindowController` wraps `NSWindow` + `VZVirtualMachineView`. Window size derived from configurable screen dimensions and scale factor. Touch input translated from mouse events to multi-touch via `VPhoneVirtualMachineView`.
- **Guest daemon (vphoned):** ObjC daemon running inside the iOS VM as a LaunchDaemon. Communicates with host over vsock port 1337 using length-prefixed JSON (`[uint32 BE length][UTF-8 JSON]`). Handles HID injection, developer mode, location spoofing, and file operations. Host side is `VPhoneControl` which auto-reconnects and supports binary auto-update on connect.
- **Control protocol:** All commands use async request-response via `VPhoneControl.sendRequest()` with pending request tracking. Menu actions (`VPhoneMenuConnect`) await responses and show results as `NSAlert` sheets on the VM window.
- **Menu system:** `VPhoneMenuController` owns the menu bar, built from extensions in separate files per menu (Keys, Type, Location, Connect). Each extension has its own `build*Menu()` method.
- **File browser:** SwiftUI-based (`VPhoneFileBrowserView` + `VPhoneFileBrowserModel`) hosted in a separate `NSWindow` via `NSHostingController`. Supports search, sort, upload/download, drag-drop. File operations go through `VPhoneControl` async APIs.
- **Location sync:** `VPhoneLocationProvider` wraps `CLLocationManager`, forwards host Mac's GPS coordinates to the guest over vsock when toggled from the Location menu.
---
## Firmware Assembly Pipeline
The firmware is a **PCC/iPhone hybrid** — PCC boot infrastructure wrapping iPhone iOS userland.
### Pipeline Stages
```
1. make fw_prepare Download iPhone + cloudOS IPSWs, merge, generate hybrid plists
2. make fw_patch Patch 6 boot-chain components for signature bypass + debug
OR make fw_patch_jb Base patches + JB extensions (iBSS nonce, TXM CS, kernel JB)
3. make ramdisk_build Build SSH ramdisk from SHSH blob, inject tools, sign with IM4M
4. make vm_new Create VM directory (sparse disk, SEP storage, copy ROMs)
5. make boot_dfu Boot VM in DFU mode
6. make ramdisk_send Load boot chain + ramdisk via irecovery
7. make cfw_install Mount Cryptex, patch userland, install base tools
OR make cfw_install_jb Base CFW + JB phases (jetsam patch, procursus bootstrap)
```
### Component Origins
The firmware merges two Apple IPSWs:
- **iPhone IPSW:** `iPhone17,3_26.1_23B85_Restore.ipsw` (d47ap)
- **cloudOS IPSW:** PCC vresearch101ap IPSW (CDN hash URL)
`fw_prepare.sh` extracts both, then copies cloudOS boot chain into the
iPhone restore directory (`kernelcache.*`, `Firmware/{agx,all_flash,ane,dfu,pmp}/*`,
`Firmware/*.im4p`). The cloudOS extract is deleted after merge.
#### Boot Chain — from PCC (cloudOS / vresearch101ap)
| Component | File | Patched | Patch Purpose |
| ------------ | ----------------------------------------------------------- | -------- | --------------------------------------------------- |
| AVPBooter | `AVPBooter.vresearch1.bin` | Yes (1) | DGST signature validation bypass |
| LLB | `Firmware/all_flash/LLB.vresearch101.RELEASE.im4p` | Yes (6) | Serial + image4 bypass + boot-args + rootfs + panic |
| iBSS | `Firmware/dfu/iBSS.vresearch101.RELEASE.im4p` | Yes (2) | Serial labels + image4 callback bypass |
| iBEC | `Firmware/dfu/iBEC.vresearch101.RELEASE.im4p` | Yes (3) | Serial + image4 bypass + boot-args |
| SPTM | `Firmware/all_flash/sptm.vresearch1.release.im4p` | No | — |
| TXM | `Firmware/txm.iphoneos.research.im4p` | Yes (1) | Trustcache validation bypass |
| SEP Firmware | `Firmware/all_flash/sep-firmware.vresearch101.RELEASE.im4p` | No | — |
| DeviceTree | `Firmware/all_flash/DeviceTree.vphone600ap.im4p` | No | — |
| KernelCache | `kernelcache.release.vphone600` | Yes (25) | APFS, MAC, debugger, launch constraints, etc. |
| GPU/ANE/PMP | `Firmware/{agx,ane,pmp}/*` | No | — |
> TXM filename says "iphoneos" but is copied from cloudOS IPSW (`fw_prepare.sh` line 81).
#### OS / Filesystem — from iPhone (iPhone17,3)
| Component | Notes |
| -------------------------------- | ---------------------- |
| OS | iPhone OS image |
| SystemVolume | System partition |
| StaticTrustCache | Static trust cache |
| Ap,SystemVolumeCanonicalMetadata | System volume metadata |
> Cryptex1 components (SystemOS/AppOS DMGs) are **not** included in the BuildManifest.
> They are only needed by `cfw_install.sh` which reads paths from the original iPhone manifest separately.
### Build Identity
`fw_manifest.py` generates a **single** DFU erase-install identity (20 components).
The VM always boots via DFU restore, so only one identity is needed.
| Variant | Boot Chain | Ramdisk |
| -------------------------------------------- | -------------------------------------------------- | --------- |
| `Darwin Cloud Customer Erase Install (IPSW)` | PCC RELEASE (LLB/iBSS/iBEC) + RESEARCH (iBoot/TXM) | PCC erase |
idevicerestore selects this identity by partial-matching `Info.Variant` against
`"Erase Install (IPSW)"` while excluding `"Research"`.
### Patched Components Summary
**Boot chain patches** (`fw_patch.py`) — all 6 targets from **PCC**:
| Component | Patches | Technique |
| ----------- | ------- | ------------------------------------------------------------------------------- |
| AVPBooter | 1 | `mov x0, #0` (DGST bypass) |
| iBSS | 2 | Dynamic via `patchers/iboot.py` (string anchors, instruction patterns) |
| iBEC | 3 | Dynamic via `patchers/iboot.py` (string anchors, instruction patterns) |
| LLB | 6 | Dynamic via `patchers/iboot.py` (string anchors, instruction patterns) |
| TXM | 1 | Dynamic via `patchers/txm.py` (trustcache hash lookup bypass) |
| KernelCache | 25 | Dynamic via `patchers/kernel.py` (string anchors, ADRP+ADD xrefs, BL frequency) |
**JB extension patches** (`fw_patch_jb.py`) — runs base patches first, then adds:
| Component | JB Patches | Technique |
| ----------- | ---------- | ----------------------------------------------------------------------------------- |
| iBSS | +1 | `patchers/iboot_jb.py` (skip nonce generation) |
| TXM | +13 | `patchers/txm_jb.py` (CS validation bypass, get-task-allow, debugger ent, dev mode) |
| KernelCache | +34 | `patchers/kernel_jb.py` (trustcache, execve, sandbox, task/VM, kcall10) |
**CFW patches** (`patchers/cfw.py` / `cfw_install.sh`) — targets from **iPhone** Cryptex SystemOS:
| Binary | Technique | Purpose | Mode |
| -------------------- | ---------------------------------------------------- | --------------------------------------------- | ---- |
| seputil | String patch (`/%s.gl``/AA.gl`) | Gigalocker UUID fix | Base |
| launchd_cache_loader | NOP (disassembly-anchored) | Bypass cache validation | Base |
| mobileactivationd | Return true (disassembly-anchored) | Skip activation check | Base |
| launchd.plist | Plist injection | Add bash/dropbear/trollvnc daemons | Base |
| launchd | Branch (skip jetsam guard) + LC_LOAD_DYLIB injection | Prevent jetsam panic + load launchdhook.dylib | JB |
**JB install phases** (`cfw_install_jb.sh``cfw_install.sh` with `CFW_JB_MODE=1`):
| Phase | Action |
| ----- | ---------------------------------------------------------------------------------------------------- |
| JB-1 | Patch `/mnt1/sbin/launchd`: inject `launchdhook.dylib` LC_LOAD_DYLIB + jetsam guard bypass |
| JB-2 | Install procursus bootstrap to `/mnt5/<hash>/jb-vphone/procursus` |
| JB-3 | Deploy BaseBin hooks (`systemhook.dylib`, `launchdhook.dylib`, `libellekit.dylib`) to `/mnt1/cores/` |
### Boot Flow
**Base** (`fw_patch` + `cfw_install`):
```
AVPBooter (ROM, PCC)
→ LLB (PCC, patched)
→ iBSS (PCC, patched, DFU)
→ iBEC (PCC, patched, DFU)
→ SPTM + TXM (PCC, TXM patched)
→ KernelCache (PCC, 25 patches)
→ Ramdisk (PCC or iPhone, SSH-injected)
→ iOS userland (iPhone, CFW-patched)
```
**Jailbreak** (`fw_patch_jb` + `cfw_install_jb`):
```
AVPBooter (ROM, PCC)
→ LLB (PCC, patched)
→ iBSS (PCC, patched + nonce skip)
→ iBEC (PCC, patched, DFU)
→ SPTM + TXM (PCC, TXM patched + CS/ent/devmode bypass)
→ KernelCache (PCC, 25 base + ~34 JB patches)
→ Ramdisk (SSH-injected)
→ iOS userland (CFW + jetsam fix + procursus)
```
### Ramdisk Build (`ramdisk_build.py`)
1. Extract IM4M from SHSH blob
2. Process 8 components: iBSS, iBEC, SPTM, DeviceTree, SEP, TXM, KernelCache, Ramdisk+Trustcache
3. For ramdisk: extract base DMG → create 254 MB APFS volume → mount → inject SSH tools from `resources/ramdisk_input.tar.zst` → re-sign Mach-Os with ldid + signcert.p12 → build trustcache
4. Sign all components with IM4M manifest → output to `Ramdisk/` directory as IMG4 files
### CFW Installation (`cfw_install.sh`)
7 phases (+ 2 JB phases), safe to re-run (idempotent):
1. Decrypt/mount Cryptex SystemOS and AppOS DMGs (`ipsw` + `aea`)
2. Patch seputil (gigalocker UUID)
3. Install GPU driver (AppleParavirtGPUMetalIOGPUFamily)
4. Install iosbinpack64 (jailbreak tools)
5. Patch launchd_cache_loader (NOP cache validation)
6. Patch mobileactivationd (activation bypass)
7. Install LaunchDaemons (bash, dropbear SSH, trollvnc)
**JB-only phases** (enabled via `make cfw_install_jb` or `CFW_JB_MODE=1`):
- JB-1: Patch launchd jetsam guard (prevents jetsam panic on boot)
- JB-2: Install procursus bootstrap + optional Sileo to `/mnt5/<hash>/jb-vphone/`
- **Private API access:** Via [Dynamic](https://github.com/mhdhejazi/Dynamic) library (runtime method dispatch from pure Swift). No ObjC bridge.
- **App lifecycle:** `main.swift` `NSApplication` + `VPhoneAppDelegate`. CLI args parsed before run loop. AppDelegate drives VM start/window/shutdown.
- **Configuration:** `ArgumentParser` `VPhoneVirtualMachine.Options` `VZVirtualMachineConfiguration`.
- **Guest daemon (vphoned):** ObjC daemon inside iOS VM, vsock port 1337, length-prefixed JSON protocol. Host side is `VPhoneControl` with auto-reconnect.
- **Menu system:** `VPhoneMenuController` + per-menu extensions (Keys, Type, Location, Connect, Install, Record).
- **File browser:** SwiftUI (`VPhoneFileBrowserView` + `VPhoneFileBrowserModel`) in `NSHostingController`. Search, sort, upload/download, drag-drop via `VPhoneControl`.
- **IPA installation:** `VPhoneIPAInstaller` extracts + re-signs via `VPhoneSigner` + installs over vsock.
- **Screen recording:** `VPhoneScreenRecorder` captures VM display. Controls via Record menu.
---
@@ -297,175 +131,31 @@ AVPBooter (ROM, PCC)
- **Sections:** Use `// MARK: -` to organize code within files.
- **Access control:** Default (internal). Only mark `private` when needed for clarity.
- **Concurrency:** `@MainActor` for VM and UI classes. `nonisolated` delegate methods use `MainActor.isolated {}` to hop back safely.
- **Naming:** Types are `VPhone`-prefixed (`VPhoneVirtualMachine`, `VPhoneWindowController`). Match Apple framework conventions.
- **Naming:** Types are `VPhone`-prefixed. Match Apple framework conventions.
- **Private APIs:** Use `Dynamic()` for runtime method dispatch. Touch objects use `NSClassFromString` + KVC to avoid designated initializer crashes.
- **NSWindow `isReleasedWhenClosed`:** Always set `window.isReleasedWhenClosed = false` for programmatically created windows managed by an `NSWindowController`. The default is `true`, which causes the window to be released on close while `NSWindowController` and `_NSWindowTransformAnimation` still hold references — `objc_release` crashes on a dangling pointer during CA transaction commit. Nib-loaded windows handled by `NSWindowController` get this set automatically, but programmatic windows do not.
- **NSWindow `isReleasedWhenClosed`:** Always set `window.isReleasedWhenClosed = false` for programmatically created windows managed by an `NSWindowController`. The default `true` causes `objc_release` crashes on dangling pointers during CA transaction commit.
### Shell Scripts
- Use `zsh` with `set -euo pipefail`.
- Scripts resolve their own directory via `${0:a:h}` or `$(cd "$(dirname "$0")" && pwd)`.
- Build uses `make build` which handles compilation and entitlement signing.
### Python Scripts
- Firmware patching uses `capstone` (disassembly), `keystone-engine` (assembly), and `pyimg4` (IM4P handling).
- `patchers/kernel.py` uses dynamic pattern finding (string anchors, ADRP+ADD xrefs, BL frequency analysis) — nothing is hardcoded to specific offsets.
- Each patch is logged with offset and before/after state.
- Scripts operate on a VM directory and auto-discover the `*Restore*` subdirectory.
- **Environment:** Use the project venv (`source .venv/bin/activate`). Create with `make setup_venv`. All deps in `requirements.txt`: `capstone`, `keystone-engine`, `pyimg4`.
- Patchers use `capstone` (disassembly), `keystone-engine` (assembly), `pyimg4` (IM4P handling).
- Dynamic pattern finding (string anchors, ADRP+ADD xrefs, BL frequency) — no hardcoded offsets.
- Each patch logged with offset and before/after state.
- Use project venv (`source .venv/bin/activate`). Create with `make setup_venv`.
## Build & Sign
The binary requires private entitlements to use PV=3 virtualization:
- `com.apple.private.virtualization`
- `com.apple.private.virtualization.security-research`
- `com.apple.security.virtualization`
- `com.apple.vm.networking`
- `com.apple.security.get-task-allow`
Always use `make build` — never `swift build` alone, as the unsigned binary will fail at runtime.
## VM Creation (`make vm_new`)
Creates a VM directory with:
- Sparse disk image (default 64 GB)
- SEP storage (512 KB flat file)
- AVPBooter + AVPSEPBooter ROMs (copied from `/System/Library/Frameworks/Virtualization.framework/`)
- machineIdentifier (created on first boot if missing, persisted for stable ECID)
- NVRAM (created/overwritten each boot)
All paths are passed explicitly via CLI (`--rom`, `--disk`, `--nvram`, `--machine-id`, `--sep-storage`, `--sep-rom`). SEP coprocessor is always enabled.
Display is configurable via `--screen-width`, `--screen-height`, `--screen-ppi`, `--screen-scale` (defaults: 1290x2796 @ 460 PPI, scale 3.0).
Override defaults: `make vm_new VM_DIR=myvm DISK_SIZE=32`.
The binary requires private entitlements for PV=3 virtualization. Always use `make build` — never `swift build` alone, as the unsigned binary will fail at runtime.
## Design System
### Intent
**Who:** Security researchers working with Apple firmware and virtual devices. Technical, patient, comfortable in terminals. Likely running alongside GDB, serial consoles, and SSH sessions.
**Task:** Boot, configure, and interact with virtual iPhones for firmware research. Monitor boot state, capture serial output, debug at the firmware level.
**Feel:** Like a research instrument. Precise, informative, honest about internal state. No decoration — every pixel earns its place.
### Palette
- **Background:** Dark neutral (`#1a1a1a` — near-black, low blue to reduce eye strain during long sessions)
- **Surface:** `#242424` (elevated panels), `#2e2e2e` (interactive elements)
- **Text primary:** `#e0e0e0` (high contrast without being pure white)
- **Text secondary:** `#888888` (labels, metadata)
- **Accent — status green:** `#4ade80` (VM running, boot success)
- **Accent — amber:** `#fbbf24` (DFU mode, warnings, in-progress states)
- **Accent — red:** `#f87171` (errors, VM stopped with error)
- **Accent — blue:** `#60a5fa` (informational, links, interactive highlights)
Rationale: Dark surfaces match the terminal-adjacent workflow. Status colors borrow from oscilloscope/JTAG tooling — green for good, amber for attention, red for fault. No brand colors — this is a tool, not a product.
### Typography
- **UI font:** System monospace (SF Mono / Menlo). Everything in this tool is technical — monospace respects the content.
- **Headings:** System sans (SF Pro) semibold, used sparingly for section labels only.
- **Serial/log output:** Monospace, `#e0e0e0` on dark background. No syntax highlighting — raw output, exactly as received.
### Depth
- **Approach:** Flat with subtle 1px borders (`#333333`). No shadows, no blur. Depth through color difference only.
- **Rationale:** Shadows suggest consumer software. Borders suggest instrument panels. This is an instrument.
### Spacing
- **Base unit:** 8px
- **Component padding:** 12px (1.5 units)
- **Section gaps:** 16px (2 units)
- **Window margins:** 16px
### Components
- **Status indicator:** Small circle (8px) with color fill + label. No animation — state changes are instantaneous.
- **VM display:** Full-bleed within its container. No rounded corners on the display itself.
- **Log output:** Scrolling monospace region, bottom-anchored (newest at bottom). No line numbers unless requested.
- **Toolbar (if present):** Icon-only, 32px touch targets, subtle hover state (`#2e2e2e` -> `#3a3a3a`).
---
## JB Kernel Patcher Status (`patches-jb` branch)
Branch is 8 commits ahead of `main`. All changes are **additive** — non-JB code paths are unaffected.
### Diff vs Main
| File | Change | Impact on non-JB |
| ---------------------- | -------------------------------------------------- | ------------------------ |
| `kernel.py` | +1 line: `self.patches = []` reset in `find_all()` | None (harmless init) |
| `cfw.py` | +`patch-launchd-jetsam`, +`inject-dylib` commands | None (new commands only) |
| `kernel_jb.py` | **New file** — 2128 lines | N/A |
| `txm_jb.py` | **New file** — 335 lines | N/A |
| `iboot_jb.py` | **New file** — 105 lines | N/A |
| `fw_patch_jb.py` | **New file** — 115 lines (WIP) | N/A |
| `cfw_install_jb.sh` | **New file** — 214 lines | N/A |
| `cfw_jb_input.tar.zst` | **New file** — JB resources | N/A |
| `Makefile` | +JB targets (`fw_patch_jb`, `cfw_install_jb`) | None (additive) |
| `AGENTS.md` | Documentation updates | N/A |
### Patch Counts
**Base patcher** (`kernel.py`): **25 patches** — verified identical to main.
**JB patcher** (`kernel_jb.py`): **160 patches** from 22 methods:
- **19 of 22 PASSING** — Groups A (sandbox hooks, AMFI, execve), B (string-anchored), C (shellcode)
- **3 FAILING** — see below
### 3 Remaining Failures
| Patch | Upstream Offset | Root Cause | Proposed Strategy |
| ------------------------------------- | ------------------------------------------------- | ------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
| `patch_nvram_verify_permission` | NOP BL at `0x1234034` | 332 identical IOKit methods match structural filter; "krn." string leads to wrong function | Find via "IONVRAMController" string → metaclass ctor → PAC disc `#0xcda1` → search `__DATA_CONST` vtable entries (first entry after 3 nulls) with matching PAC disc + BL to memmove |
| `patch_thid_should_crash` | Zero `0x67EB50` | String in `__PRELINK_INFO` plist (no code refs); value already `0x00000000` in PCC kernel | Safe to return True (no-op); or find via `sysctl_oid` struct search in `__DATA` |
| `patch_hook_cred_label_update_execve` | Shellcode at `0xAB17D8` + ops table at `0xA54518` | Needs `_vfs_context_current` (`0xCC5EAC`) and `_vnode_getattr` (`0xCC91C0`) — 0 symbols available | Find via sandbox ops table → original hook func → BL targets by caller count (vfs_context_current = highest, vnode_getattr = near `mov wN, #0x380`) |
### Key Findings (from `research/kernel_jb_patch_notes.md`)
**All offsets in `kernel.py` are file offsets**`bl_callers` dict, `_is_bl()`, `_disas_at()`, `find_string_refs()` all use file offsets, not VAs.
**IONVRAMController vtable discovery chain**:
```
"IONVRAMController" string @ 0xA2FEB
→ ADRP+ADD refs → metaclass ctor @ 0x125D2C0
→ PAC discriminator: movk x17, #0xcda1, lsl #48
→ instance size: mov w3, #0x88
→ class vtable in __DATA_CONST @ 0x7410B8 (preceded by 3 null entries)
→ vtable[0] = 0x1233E40 = verifyPermission
→ BL to memmove (3114 callers) at +0x1F4 = 0x1234034 ← NOP this
```
**vfs_context_current / vnode_getattr resolution**:
```
sandbox ops table → entry[16] = original hook @ 0x239A0B4
→ disassemble hook → find BL targets:
- _vfs_context_current: BL target with >1000 callers, short function
- _vnode_getattr: BL target near "mov wN, #0x380", moderate callers
```
### Upstream Reference Offsets (iPhone17,3 26.1)
| Symbol | File Offset | Notes |
| -------------------------- | ------------------------ | ------------------- |
| kern_text | `0xA74000``0x24B0000` | |
| base_va | `0xFFFFFE0007004000` | |
| verifyPermission func | `0x1233E40` | vtable @ `0x7410B8` |
| verifyPermission patch | `0x1234034` | NOP BL to memmove |
| \_thid_should_crash var | `0x67EB50` | already 0 |
| \_vfs_context_current | `0xCC5EAC` | from BL encoding |
| \_vnode_getattr | `0xCC91C0` | from BL encoding |
| hook_cred_label orig | `0x239A0B4` | from B encoding |
| sandbox ops entry | `0xA54518` | index 16 |
| OSMetaClass::OSMetaClass() | `0x10EA790` | 5236 callers |
| memmove | `0x12CB0D0` | 3114 callers |
- **Audience:** Security researchers. Terminal-adjacent workflow.
- **Feel:** Research instrument — precise, informative, no decoration.
- **Palette:** Dark neutral (`#1a1a1a` bg), status green/amber/red/blue accents.
- **Typography:** System monospace (SF Mono / Menlo) for UI and log output.
- **Depth:** Flat with 1px borders (`#333333`). No shadows.
- **Spacing:** 8px base unit, 12px component padding, 16px section gaps.

View File

@@ -18,11 +18,11 @@ Apple の Virtualization.framework と PCC の研究用 VM インフラを使用
セキュリティバイパスのレベルが異なる3つのパッチバリアントが利用可能です
| バリアント | ブートチェーン | CFW | Make ターゲット |
| -------------------- | :----------: | :-------: | ---------------------------------- |
| **通常版** | 38 パッチ | 10 フェーズ | `fw_patch` + `cfw_install` |
| **開発版** | 47 パッチ | 12 フェーズ | `fw_patch_dev` + `cfw_install_dev` |
| **脱獄版WIP** | 84 パッチ | 14 フェーズ | `fw_patch_jb` + `cfw_install_jb` |
| バリアント | ブートチェーン | CFW | Make ターゲット |
| ----------------- | :------------: | :---------: | ---------------------------------- |
| **通常版** | 38 パッチ | 10 フェーズ | `fw_patch` + `cfw_install` |
| **開発版** | 47 パッチ | 12 フェーズ | `fw_patch_dev` + `cfw_install_dev` |
| **脱獄版WIP** | 84 パッチ | 14 フェーズ | `fw_patch_jb` + `cfw_install_jb` |
詳細なコンポーネントごとの内訳については [research/patch_comparison_all_variants.md](../research/patch_comparison_all_variants.md) を参照してください。

View File

@@ -18,11 +18,11 @@ PCC 리서치 VM 인프라와 Apple의 Virtualization.framework를 사용하여
보안 우회 수준이 다른 3가지 패치 변형을 사용할 수 있습니다:
| 변형 | 부트 체인 | CFW | Make 타겟 |
| ----------------- | :--------: | :-------: | ---------------------------------- |
| **일반** | 38 패치 | 10 페이즈 | `fw_patch` + `cfw_install` |
| **개발** | 47 패치 | 12 페이즈 | `fw_patch_dev` + `cfw_install_dev` |
| **탈옥 (WIP)** | 84 패치 | 14 페이즈 | `fw_patch_jb` + `cfw_install_jb` |
| 변형 | 부트 체인 | CFW | Make 타겟 |
| -------------- | :-------: | :-------: | ---------------------------------- |
| **일반** | 38 패치 | 10 페이즈 | `fw_patch` + `cfw_install` |
| **개발** | 47 패치 | 12 페이즈 | `fw_patch_dev` + `cfw_install_dev` |
| **탈옥 (WIP)** | 84 패치 | 14 페이즈 | `fw_patch_jb` + `cfw_install_jb` |
컴포넌트별 상세 분류는 [research/patch_comparison_all_variants.md](../research/patch_comparison_all_variants.md)를 참조하세요.

View File

@@ -18,11 +18,11 @@
提供三种补丁变体,安全绕过级别逐步递增:
| 变体 | 启动链 | 自定义固件 | Make 目标 |
| ----------------- | :--------: | :-------: | ---------------------------------- |
| **常规版** | 38 个补丁 | 10 个阶段 | `fw_patch` + `cfw_install` |
| **开发版** | 47 个补丁 | 12 个阶段 | `fw_patch_dev` + `cfw_install_dev` |
| **越狱版WIP** | 84 个补丁 | 14 个阶段 | `fw_patch_jb` + `cfw_install_jb` |
| 变体 | 启动链 | 自定义固件 | Make 目标 |
| ----------------- | :-------: | :--------: | ---------------------------------- |
| **常规版** | 38 个补丁 | 10 个阶段 | `fw_patch` + `cfw_install` |
| **开发版** | 47 个补丁 | 12 个阶段 | `fw_patch_dev` + `cfw_install_dev` |
| **越狱版WIP** | 84 个补丁 | 14 个阶段 | `fw_patch_jb` + `cfw_install_jb` |
详见 [research/patch_comparison_all_variants.md](../research/patch_comparison_all_variants.md) 了解各组件的详细分项对比。

View File

@@ -227,13 +227,13 @@ deterministic nonce behavior for restore/research scenarios.
Both variants work with all dynamic patches. Offsets differ but the patcher
finds them by pattern matching:
| Patch | RELEASE offset | RESEARCH offset |
| -------------------------------- | -------------- | --------------- |
| Serial label 1 | `0x084549` | `0x0861C9` |
| Serial label 2 | `0x0845F4` | `0x086274` |
| image4 callback (nop) | `0x009D14` | `0x00A0DC` |
| image4 callback (mov) | `0x009D18` | `0x00A0E0` |
| Skip generate_nonce _(JB patch)_ | `0x00B7B8` | `0x00BC08` |
| Patch | RELEASE offset | RESEARCH offset |
| --------------------------------- | -------------- | --------------- |
| Serial label 1 | `0x084549` | `0x0861C9` |
| Serial label 2 | `0x0845F4` | `0x086274` |
| image4 callback (nop) | `0x009D14` | `0x00A0DC` |
| image4 callback (mov) | `0x009D18` | `0x00A0E0` |
| Skip generate*nonce *(JB patch)\_ | `0x00B7B8` | `0x00BC08` |
`fw_patch.py` targets RELEASE, matching the BuildManifest identity
(PCC RELEASE for LLB/iBSS/iBEC). The reference script used RESEARCH_RELEASE.

View File

@@ -42,7 +42,10 @@ Three firmware variants are available, each building on the previous:
### TXM
The three variants use different TXM patchers. Regular uses `txm.py` (1 patch), Dev uses `txm_dev.py` (10 patches), JB uses `txm_jb.py` (12 patches).
TXM patch composition by variant:
- Regular: `txm.py` (1 patch).
- Dev: `txm_dev.py` (10 patches total).
- JB: base `txm.py` (1 patch) + `txm_jb.py` extension (11 patches) = 12 total.
| # | Patch | Purpose | Regular | Dev | JB |
| --- | ------------------------------------------------- | ----------------------------------------------------------- | :-----: | :-: | :-: |

View File

@@ -36,117 +36,117 @@ SSH_HOST="localhost"
SSH_RETRY="${SSH_RETRY:-3}"
SSHPASS_BIN=""
SSH_OPTS=(
-o StrictHostKeyChecking=no
-o UserKnownHostsFile=/dev/null
-o PreferredAuthentications=password
-o ConnectTimeout=30
-q
-o StrictHostKeyChecking=no
-o UserKnownHostsFile=/dev/null
-o PreferredAuthentications=password
-o ConnectTimeout=30
-q
)
# ── Helpers ─────────────────────────────────────────────────────
die() {
echo "[-] $*" >&2
exit 1
echo "[-] $*" >&2
exit 1
}
check_prerequisites() {
local missing=()
command -v sshpass &>/dev/null || missing+=("sshpass")
command -v ldid &>/dev/null || missing+=("ldid (brew install ldid-procursus)")
if ((${#missing[@]} > 0)); then
die "Missing required tools: ${missing[*]}. Run: make setup_tools"
fi
SSHPASS_BIN="$(command -v sshpass)"
local missing=()
command -v sshpass &>/dev/null || missing+=("sshpass")
command -v ldid &>/dev/null || missing+=("ldid (brew install ldid-procursus)")
if ((${#missing[@]} > 0)); then
die "Missing required tools: ${missing[*]}. Run: make setup_tools"
fi
SSHPASS_BIN="$(command -v sshpass)"
}
_sshpass() {
"$SSHPASS_BIN" -p "$SSH_PASS" "$@"
"$SSHPASS_BIN" -p "$SSH_PASS" "$@"
}
_ssh_retry() {
local attempt rc label
label=${2:-cmd}
for ((attempt = 1; attempt <= SSH_RETRY; attempt++)); do
"$@" && return 0
rc=$?
[[ $rc -ne 255 ]] && return $rc # real command failure — don't retry
echo " [${label}] connection lost (attempt $attempt/$SSH_RETRY), retrying in 3s..." >&2
sleep 3
done
return 255
local attempt rc label
label=${2:-cmd}
for ((attempt = 1; attempt <= SSH_RETRY; attempt++)); do
"$@" && return 0
rc=$?
[[ $rc -ne 255 ]] && return $rc # real command failure — don't retry
echo " [${label}] connection lost (attempt $attempt/$SSH_RETRY), retrying in 3s..." >&2
sleep 3
done
return 255
}
ssh_cmd() {
_ssh_retry _sshpass ssh "${SSH_OPTS[@]}" -p "$SSH_PORT" "$SSH_USER@$SSH_HOST" "$@";
ssh_cmd() {
_ssh_retry _sshpass ssh "${SSH_OPTS[@]}" -p "$SSH_PORT" "$SSH_USER@$SSH_HOST" "$@"
}
scp_to() {
_ssh_retry _sshpass scp -q "${SSH_OPTS[@]}" -P "$SSH_PORT" -r "$1" "$SSH_USER@$SSH_HOST:$2";
scp_to() {
_ssh_retry _sshpass scp -q "${SSH_OPTS[@]}" -P "$SSH_PORT" -r "$1" "$SSH_USER@$SSH_HOST:$2"
}
scp_from() {
_ssh_retry _sshpass scp -q "${SSH_OPTS[@]}" -P "$SSH_PORT" "$SSH_USER@$SSH_HOST:$1" "$2";
scp_from() {
_ssh_retry _sshpass scp -q "${SSH_OPTS[@]}" -P "$SSH_PORT" "$SSH_USER@$SSH_HOST:$1" "$2"
}
remote_file_exists() {
ssh_cmd "test -f '$1'" 2>/dev/null
ssh_cmd "test -f '$1'" 2>/dev/null
}
ldid_sign() {
local file="$1" bundle_id="${2:-}"
local args=(-S -M "-K$VM_DIR/$CFW_INPUT/signcert.p12")
[[ -n "$bundle_id" ]] && args+=("-I$bundle_id")
ldid "${args[@]}" "$file"
local file="$1" bundle_id="${2:-}"
local args=(-S -M "-K$VM_DIR/$CFW_INPUT/signcert.p12")
[[ -n "$bundle_id" ]] && args+=("-I$bundle_id")
ldid "${args[@]}" "$file"
}
# Detach a DMG mountpoint if currently mounted, ignore errors
safe_detach() {
local mnt="$1"
if mount | grep -q "$mnt"; then
sudo hdiutil detach -force "$mnt" 2>/dev/null || true
fi
local mnt="$1"
if mount | grep -q "$mnt"; then
sudo hdiutil detach -force "$mnt" 2>/dev/null || true
fi
}
# Mount device filesystem, tolerate already-mounted
remote_mount() {
local dev="$1" mnt="$2" opts="${3:-rw}"
ssh_cmd "/sbin/mount_apfs -o $opts $dev $mnt 2>/dev/null || true"
local dev="$1" mnt="$2" opts="${3:-rw}"
ssh_cmd "/sbin/mount_apfs -o $opts $dev $mnt 2>/dev/null || true"
}
# ── Find restore directory ─────────────────────────────────────
find_restore_dir() {
for dir in "$VM_DIR"/iPhone*_Restore; do
[[ -f "$dir/BuildManifest.plist" ]] && echo "$dir" && return
done
die "No restore directory found in $VM_DIR"
for dir in "$VM_DIR"/iPhone*_Restore; do
[[ -f "$dir/BuildManifest.plist" ]] && echo "$dir" && return
done
die "No restore directory found in $VM_DIR"
}
# ── Setup input resources ──────────────────────────────────────
setup_cfw_input() {
[[ -d "$VM_DIR/$CFW_INPUT" ]] && return
local archive
for search_dir in "$SCRIPT_DIR/resources" "$SCRIPT_DIR" "$VM_DIR"; do
archive="$search_dir/$CFW_ARCHIVE"
if [[ -f "$archive" ]]; then
echo " Extracting $CFW_ARCHIVE..."
tar --zstd -xf "$archive" -C "$VM_DIR"
return
fi
done
die "Neither $CFW_INPUT/ nor $CFW_ARCHIVE found"
[[ -d "$VM_DIR/$CFW_INPUT" ]] && return
local archive
for search_dir in "$SCRIPT_DIR/resources" "$SCRIPT_DIR" "$VM_DIR"; do
archive="$search_dir/$CFW_ARCHIVE"
if [[ -f "$archive" ]]; then
echo " Extracting $CFW_ARCHIVE..."
tar --zstd -xf "$archive" -C "$VM_DIR"
return
fi
done
die "Neither $CFW_INPUT/ nor $CFW_ARCHIVE found"
}
# ── Check prerequisites ────────────────────────────────────────
check_prereqs() {
command -v ipsw >/dev/null 2>&1 || die "'ipsw' not found. Install: brew install blacktop/tap/ipsw"
command -v aea >/dev/null 2>&1 || die "'aea' not found (requires macOS 12+)"
command -v python3 >/dev/null 2>&1 || die "python3 not found"
python3 -c "import capstone, keystone" 2>/dev/null ||
die "Missing Python deps. Install: pip install capstone keystone-engine"
command -v ipsw >/dev/null 2>&1 || die "'ipsw' not found. Install: brew install blacktop/tap/ipsw"
command -v aea >/dev/null 2>&1 || die "'aea' not found (requires macOS 12+)"
command -v python3 >/dev/null 2>&1 || die "python3 not found"
python3 -c "import capstone, keystone" 2>/dev/null ||
die "Missing Python deps. Install: pip install capstone keystone-engine"
}
# ── Cleanup trap (unmount DMGs on error) ───────────────────────
cleanup_on_exit() {
safe_detach "$TEMP_DIR/mnt_sysos"
safe_detach "$TEMP_DIR/mnt_appos"
safe_detach "$TEMP_DIR/mnt_sysos"
safe_detach "$TEMP_DIR/mnt_appos"
}
trap cleanup_on_exit EXIT
@@ -187,20 +187,20 @@ MNT_APPOS="$TEMP_DIR/mnt_appos"
# Decrypt SystemOS AEA (cached — skip if already decrypted)
if [[ ! -f "$SYSOS_DMG" ]]; then
echo " Extracting AEA key..."
AEA_KEY=$(ipsw fw aea --key "$RESTORE_DIR/$CRYPTEX_SYSOS")
echo " key: $AEA_KEY"
echo " Decrypting SystemOS..."
aea decrypt -i "$RESTORE_DIR/$CRYPTEX_SYSOS" -o "$SYSOS_DMG" -key-value "$AEA_KEY"
echo " Extracting AEA key..."
AEA_KEY=$(ipsw fw aea --key "$RESTORE_DIR/$CRYPTEX_SYSOS")
echo " key: $AEA_KEY"
echo " Decrypting SystemOS..."
aea decrypt -i "$RESTORE_DIR/$CRYPTEX_SYSOS" -o "$SYSOS_DMG" -key-value "$AEA_KEY"
else
echo " Using cached SystemOS DMG"
echo " Using cached SystemOS DMG"
fi
# Copy AppOS (unencrypted, cached)
if [[ ! -f "$APPOS_DMG" ]]; then
cp "$RESTORE_DIR/$CRYPTEX_APPOS" "$APPOS_DMG"
cp "$RESTORE_DIR/$CRYPTEX_APPOS" "$APPOS_DMG"
else
echo " Using cached AppOS DMG"
echo " Using cached AppOS DMG"
fi
# Detach any leftover mounts from previous runs
@@ -221,23 +221,23 @@ remote_mount /dev/disk1s1 /mnt1
echo " Checking APFS snapshots..."
SNAP_LIST=$(ssh_cmd "snaputil -l /mnt1 2>/dev/null" || true)
if echo "$SNAP_LIST" | grep -q "^orig-fs$"; then
echo " Snapshot 'orig-fs' already exists, skipping rename"
echo " Snapshot 'orig-fs' already exists, skipping rename"
else
UPDATE_SNAP=$(echo "$SNAP_LIST" | grep "^com\.apple\.os\.update-" | head -1)
if [[ -n "$UPDATE_SNAP" ]]; then
echo " Renaming snapshot: $UPDATE_SNAP -> orig-fs"
ssh_cmd "snaputil -n '$UPDATE_SNAP' orig-fs /mnt1"
# Verify rename succeeded
if ! ssh_cmd "snaputil -l /mnt1 2>/dev/null" | grep -q "^orig-fs$"; then
die "Failed to rename snapshot to orig-fs"
UPDATE_SNAP=$(echo "$SNAP_LIST" | grep "^com\.apple\.os\.update-" | head -1)
if [[ -n "$UPDATE_SNAP" ]]; then
echo " Renaming snapshot: $UPDATE_SNAP -> orig-fs"
ssh_cmd "snaputil -n '$UPDATE_SNAP' orig-fs /mnt1"
# Verify rename succeeded
if ! ssh_cmd "snaputil -l /mnt1 2>/dev/null" | grep -q "^orig-fs$"; then
die "Failed to rename snapshot to orig-fs"
fi
echo " Snapshot renamed, remounting..."
ssh_cmd "/sbin/umount /mnt1"
remote_mount /dev/disk1s1 /mnt1
echo " [+] Snapshot renamed to orig-fs"
else
echo " No com.apple.os.update- snapshot found, skipping"
fi
echo " Snapshot renamed, remounting..."
ssh_cmd "/sbin/umount /mnt1"
remote_mount /dev/disk1s1 /mnt1
echo " [+] Snapshot renamed to orig-fs"
else
echo " No com.apple.os.update- snapshot found, skipping"
fi
fi
ssh_cmd "/bin/rm -rf /mnt1/System/Cryptexes/App /mnt1/System/Cryptexes/OS"
@@ -269,8 +269,8 @@ echo "[2/7] Patching seputil..."
# Always patch from .bak (original unpatched binary)
if ! remote_file_exists "/mnt1/usr/libexec/seputil.bak"; then
echo " Creating backup..."
ssh_cmd "/bin/cp /mnt1/usr/libexec/seputil /mnt1/usr/libexec/seputil.bak"
echo " Creating backup..."
ssh_cmd "/bin/cp /mnt1/usr/libexec/seputil /mnt1/usr/libexec/seputil.bak"
fi
scp_from "/mnt1/usr/libexec/seputil.bak" "$TEMP_DIR/seputil"
@@ -325,8 +325,8 @@ echo "[5/7] Patching launchd_cache_loader..."
# Always patch from .bak (original unpatched binary)
if ! remote_file_exists "/mnt1/usr/libexec/launchd_cache_loader.bak"; then
echo " Creating backup..."
ssh_cmd "/bin/cp /mnt1/usr/libexec/launchd_cache_loader /mnt1/usr/libexec/launchd_cache_loader.bak"
echo " Creating backup..."
ssh_cmd "/bin/cp /mnt1/usr/libexec/launchd_cache_loader /mnt1/usr/libexec/launchd_cache_loader.bak"
fi
scp_from "/mnt1/usr/libexec/launchd_cache_loader.bak" "$TEMP_DIR/launchd_cache_loader"
@@ -343,8 +343,8 @@ echo "[6/7] Patching mobileactivationd..."
# Always patch from .bak (original unpatched binary)
if ! remote_file_exists "/mnt1/usr/libexec/mobileactivationd.bak"; then
echo " Creating backup..."
ssh_cmd "/bin/cp /mnt1/usr/libexec/mobileactivationd /mnt1/usr/libexec/mobileactivationd.bak"
echo " Creating backup..."
ssh_cmd "/bin/cp /mnt1/usr/libexec/mobileactivationd /mnt1/usr/libexec/mobileactivationd.bak"
fi
scp_from "/mnt1/usr/libexec/mobileactivationd.bak" "$TEMP_DIR/mobileactivationd"
@@ -363,36 +363,36 @@ echo "[7/7] Installing LaunchDaemons..."
VPHONED_SRC="$SCRIPT_DIR/vphoned"
VPHONED_BIN="$VPHONED_SRC/vphoned"
VPHONED_SRCS=(
"$VPHONED_SRC/vphoned.m"
"$VPHONED_SRC/vphoned_protocol.m"
"$VPHONED_SRC/vphoned_hid.m"
"$VPHONED_SRC/vphoned_devmode.m"
"$VPHONED_SRC/vphoned_location.m"
"$VPHONED_SRC/vphoned_files.m"
"$VPHONED_SRC/vphoned.m"
"$VPHONED_SRC/vphoned_protocol.m"
"$VPHONED_SRC/vphoned_hid.m"
"$VPHONED_SRC/vphoned_devmode.m"
"$VPHONED_SRC/vphoned_location.m"
"$VPHONED_SRC/vphoned_files.m"
)
needs_vphoned_build=0
if [[ ! -f "$VPHONED_BIN" ]]; then
needs_vphoned_build=1
needs_vphoned_build=1
else
for src in "${VPHONED_SRCS[@]}"; do
if [[ "$src" -nt "$VPHONED_BIN" ]]; then
needs_vphoned_build=1
break
fi
done
for src in "${VPHONED_SRCS[@]}"; do
if [[ "$src" -nt "$VPHONED_BIN" ]]; then
needs_vphoned_build=1
break
fi
done
fi
if [[ "$needs_vphoned_build" == "1" ]]; then
echo " Building vphoned for arm64..."
xcrun -sdk iphoneos clang -arch arm64 -Os -fobjc-arc \
-I"$VPHONED_SRC" \
-o "$VPHONED_BIN" "${VPHONED_SRCS[@]}" \
-framework Foundation
echo " Building vphoned for arm64..."
xcrun -sdk iphoneos clang -arch arm64 -Os -fobjc-arc \
-I"$VPHONED_SRC" \
-o "$VPHONED_BIN" "${VPHONED_SRCS[@]}" \
-framework Foundation
fi
cp "$VPHONED_BIN" "$TEMP_DIR/vphoned"
ldid \
-S"$VPHONED_SRC/entitlements.plist" \
-M "-K$VM_DIR/$CFW_INPUT/signcert.p12" \
"$TEMP_DIR/vphoned"
-S"$VPHONED_SRC/entitlements.plist" \
-M "-K$VM_DIR/$CFW_INPUT/signcert.p12" \
"$TEMP_DIR/vphoned"
scp_to "$TEMP_DIR/vphoned" "/mnt1/usr/bin/vphoned"
ssh_cmd "/bin/chmod 0755 /mnt1/usr/bin/vphoned"
# Keep a copy of the signed binary for host-side auto-update
@@ -401,8 +401,8 @@ echo " [+] vphoned installed (signed copy at .vphoned.signed)"
# Send daemon plists (overwrite on re-run)
for plist in bash.plist dropbear.plist trollvnc.plist rpcserver_ios.plist; do
scp_to "$INPUT_DIR/jb/LaunchDaemons/$plist" "/mnt1/System/Library/LaunchDaemons/"
ssh_cmd "/bin/chmod 0644 /mnt1/System/Library/LaunchDaemons/$plist"
scp_to "$INPUT_DIR/jb/LaunchDaemons/$plist" "/mnt1/System/Library/LaunchDaemons/"
ssh_cmd "/bin/chmod 0644 /mnt1/System/Library/LaunchDaemons/$plist"
done
scp_to "$VPHONED_SRC/vphoned.plist" "/mnt1/System/Library/LaunchDaemons/"
ssh_cmd "/bin/chmod 0644 /mnt1/System/Library/LaunchDaemons/vphoned.plist"
@@ -410,8 +410,8 @@ ssh_cmd "/bin/chmod 0644 /mnt1/System/Library/LaunchDaemons/vphoned.plist"
# Always patch launchd.plist from .bak (original)
echo " Patching launchd.plist..."
if ! remote_file_exists "/mnt1/System/Library/xpc/launchd.plist.bak"; then
echo " Creating backup..."
ssh_cmd "/bin/cp /mnt1/System/Library/xpc/launchd.plist /mnt1/System/Library/xpc/launchd.plist.bak"
echo " Creating backup..."
ssh_cmd "/bin/cp /mnt1/System/Library/xpc/launchd.plist /mnt1/System/Library/xpc/launchd.plist.bak"
fi
scp_from "/mnt1/System/Library/xpc/launchd.plist.bak" "$TEMP_DIR/launchd.plist"
@@ -432,10 +432,10 @@ ssh_cmd "/sbin/umount /mnt3 2>/dev/null || true"
# Only remove temp binaries
echo "[*] Cleaning up temp binaries..."
rm -f "$TEMP_DIR/seputil" \
"$TEMP_DIR/launchd_cache_loader" \
"$TEMP_DIR/mobileactivationd" \
"$TEMP_DIR/vphoned" \
"$TEMP_DIR/launchd.plist"
"$TEMP_DIR/launchd_cache_loader" \
"$TEMP_DIR/mobileactivationd" \
"$TEMP_DIR/vphoned" \
"$TEMP_DIR/launchd.plist"
echo ""
echo "[+] CFW installation complete!"
@@ -443,7 +443,7 @@ echo " Reboot the device for changes to take effect."
echo " After boot, SSH will be available on port 22222 (password: alpine)"
if [[ "$CFW_SKIP_HALT" == "1" ]]; then
echo "[*] CFW_SKIP_HALT=1, skipping halt."
echo "[*] CFW_SKIP_HALT=1, skipping halt."
else
ssh_cmd "/sbin/halt" || true
ssh_cmd "/sbin/halt" || true
fi

View File

@@ -36,137 +36,137 @@ SSH_HOST="localhost"
SSH_RETRY="${SSH_RETRY:-3}"
SSHPASS_BIN=""
SSH_OPTS=(
-o StrictHostKeyChecking=no
-o UserKnownHostsFile=/dev/null
-o PreferredAuthentications=password
-o ConnectTimeout=30
-q
-o StrictHostKeyChecking=no
-o UserKnownHostsFile=/dev/null
-o PreferredAuthentications=password
-o ConnectTimeout=30
-q
)
# ── Helpers ─────────────────────────────────────────────────────
die() {
echo "[-] $*" >&2
exit 1
echo "[-] $*" >&2
exit 1
}
check_prerequisites() {
local missing=()
command -v sshpass &>/dev/null || missing+=("sshpass")
command -v ldid &>/dev/null || missing+=("ldid (brew install ldid-procursus)")
if ((${#missing[@]} > 0)); then
die "Missing required tools: ${missing[*]}. Run: make setup_tools"
fi
SSHPASS_BIN="$(command -v sshpass)"
local missing=()
command -v sshpass &>/dev/null || missing+=("sshpass")
command -v ldid &>/dev/null || missing+=("ldid (brew install ldid-procursus)")
if ((${#missing[@]} > 0)); then
die "Missing required tools: ${missing[*]}. Run: make setup_tools"
fi
SSHPASS_BIN="$(command -v sshpass)"
}
_sshpass() {
"$SSHPASS_BIN" -p "$SSH_PASS" "$@"
"$SSHPASS_BIN" -p "$SSH_PASS" "$@"
}
_ssh_retry() {
local attempt rc label
label=${2:-cmd}
for ((attempt = 1; attempt <= SSH_RETRY; attempt++)); do
"$@" && return 0
rc=$?
[[ $rc -ne 255 ]] && return $rc # real command failure — don't retry
echo " [${label}] connection lost (attempt $attempt/$SSH_RETRY), retrying in 3s..." >&2
sleep 3
done
return 255
local attempt rc label
label=${2:-cmd}
for ((attempt = 1; attempt <= SSH_RETRY; attempt++)); do
"$@" && return 0
rc=$?
[[ $rc -ne 255 ]] && return $rc # real command failure — don't retry
echo " [${label}] connection lost (attempt $attempt/$SSH_RETRY), retrying in 3s..." >&2
sleep 3
done
return 255
}
ssh_cmd() {
_ssh_retry _sshpass ssh "${SSH_OPTS[@]}" -p "$SSH_PORT" "$SSH_USER@$SSH_HOST" "$@";
ssh_cmd() {
_ssh_retry _sshpass ssh "${SSH_OPTS[@]}" -p "$SSH_PORT" "$SSH_USER@$SSH_HOST" "$@"
}
scp_to() {
_ssh_retry _sshpass scp -q "${SSH_OPTS[@]}" -P "$SSH_PORT" -r "$1" "$SSH_USER@$SSH_HOST:$2";
scp_to() {
_ssh_retry _sshpass scp -q "${SSH_OPTS[@]}" -P "$SSH_PORT" -r "$1" "$SSH_USER@$SSH_HOST:$2"
}
scp_from() {
_ssh_retry _sshpass scp -q "${SSH_OPTS[@]}" -P "$SSH_PORT" "$SSH_USER@$SSH_HOST:$1" "$2";
scp_from() {
_ssh_retry _sshpass scp -q "${SSH_OPTS[@]}" -P "$SSH_PORT" "$SSH_USER@$SSH_HOST:$1" "$2"
}
remote_file_exists() {
ssh_cmd "test -f '$1'" 2>/dev/null
ssh_cmd "test -f '$1'" 2>/dev/null
}
ldid_sign() {
local file="$1" bundle_id="${2:-}"
local args=(-S -M "-K$VM_DIR/$CFW_INPUT/signcert.p12")
[[ -n "$bundle_id" ]] && args+=("-I$bundle_id")
ldid "${args[@]}" "$file"
local file="$1" bundle_id="${2:-}"
local args=(-S -M "-K$VM_DIR/$CFW_INPUT/signcert.p12")
[[ -n "$bundle_id" ]] && args+=("-I$bundle_id")
ldid "${args[@]}" "$file"
}
# Detach a DMG mountpoint if currently mounted, ignore errors
safe_detach() {
local mnt="$1"
if mount | grep -q "$mnt"; then
sudo hdiutil detach -force "$mnt" 2>/dev/null || true
fi
local mnt="$1"
if mount | grep -q "$mnt"; then
sudo hdiutil detach -force "$mnt" 2>/dev/null || true
fi
}
# Mount device filesystem, tolerate already-mounted
remote_mount() {
local dev="$1" mnt="$2" opts="${3:-rw}"
ssh_cmd "/sbin/mount_apfs -o $opts $dev $mnt 2>/dev/null || true"
local dev="$1" mnt="$2" opts="${3:-rw}"
ssh_cmd "/sbin/mount_apfs -o $opts $dev $mnt 2>/dev/null || true"
}
# ── Find restore directory ─────────────────────────────────────
find_restore_dir() {
for dir in "$VM_DIR"/iPhone*_Restore; do
[[ -f "$dir/BuildManifest.plist" ]] && echo "$dir" && return
done
die "No restore directory found in $VM_DIR"
for dir in "$VM_DIR"/iPhone*_Restore; do
[[ -f "$dir/BuildManifest.plist" ]] && echo "$dir" && return
done
die "No restore directory found in $VM_DIR"
}
# ── Setup input resources ──────────────────────────────────────
setup_cfw_input() {
[[ -d "$VM_DIR/$CFW_INPUT" ]] && return
local archive
for search_dir in "$SCRIPT_DIR/resources" "$SCRIPT_DIR" "$VM_DIR"; do
archive="$search_dir/$CFW_ARCHIVE"
if [[ -f "$archive" ]]; then
echo " Extracting $CFW_ARCHIVE..."
tar --zstd -xf "$archive" -C "$VM_DIR"
return
fi
done
die "Neither $CFW_INPUT/ nor $CFW_ARCHIVE found"
[[ -d "$VM_DIR/$CFW_INPUT" ]] && return
local archive
for search_dir in "$SCRIPT_DIR/resources" "$SCRIPT_DIR" "$VM_DIR"; do
archive="$search_dir/$CFW_ARCHIVE"
if [[ -f "$archive" ]]; then
echo " Extracting $CFW_ARCHIVE..."
tar --zstd -xf "$archive" -C "$VM_DIR"
return
fi
done
die "Neither $CFW_INPUT/ nor $CFW_ARCHIVE found"
}
# ── Apply dev overlay (replace rpcserver_ios in iosbinpack64) ──
apply_dev_overlay() {
local dev_bin
for search_dir in "$SCRIPT_DIR/resources/cfw_dev" "$SCRIPT_DIR/cfw_dev"; do
dev_bin="$search_dir/rpcserver_ios"
if [[ -f "$dev_bin" ]]; then
echo " Applying dev overlay (rpcserver_ios)..."
local iosbinpack="$VM_DIR/$CFW_INPUT/jb/iosbinpack64.tar"
local tmpdir="$VM_DIR/.iosbinpack_tmp"
mkdir -p "$tmpdir"
tar -xf "$iosbinpack" -C "$tmpdir"
cp "$dev_bin" "$tmpdir/iosbinpack64/usr/local/bin/rpcserver_ios"
(cd "$tmpdir" && tar -cf "$iosbinpack" iosbinpack64)
rm -rf "$tmpdir"
return
fi
done
die "Dev overlay not found (cfw_dev/rpcserver_ios)"
local dev_bin
for search_dir in "$SCRIPT_DIR/resources/cfw_dev" "$SCRIPT_DIR/cfw_dev"; do
dev_bin="$search_dir/rpcserver_ios"
if [[ -f "$dev_bin" ]]; then
echo " Applying dev overlay (rpcserver_ios)..."
local iosbinpack="$VM_DIR/$CFW_INPUT/jb/iosbinpack64.tar"
local tmpdir="$VM_DIR/.iosbinpack_tmp"
mkdir -p "$tmpdir"
tar -xf "$iosbinpack" -C "$tmpdir"
cp "$dev_bin" "$tmpdir/iosbinpack64/usr/local/bin/rpcserver_ios"
(cd "$tmpdir" && tar -cf "$iosbinpack" iosbinpack64)
rm -rf "$tmpdir"
return
fi
done
die "Dev overlay not found (cfw_dev/rpcserver_ios)"
}
# ── Check prerequisites ────────────────────────────────────────
check_prereqs() {
command -v ipsw >/dev/null 2>&1 || die "'ipsw' not found. Install: brew install blacktop/tap/ipsw"
command -v aea >/dev/null 2>&1 || die "'aea' not found (requires macOS 12+)"
command -v python3 >/dev/null 2>&1 || die "python3 not found"
python3 -c "import capstone, keystone" 2>/dev/null ||
die "Missing Python deps. Install: pip install capstone keystone-engine"
command -v ipsw >/dev/null 2>&1 || die "'ipsw' not found. Install: brew install blacktop/tap/ipsw"
command -v aea >/dev/null 2>&1 || die "'aea' not found (requires macOS 12+)"
command -v python3 >/dev/null 2>&1 || die "python3 not found"
python3 -c "import capstone, keystone" 2>/dev/null ||
die "Missing Python deps. Install: pip install capstone keystone-engine"
}
# ── Cleanup trap (unmount DMGs on error) ───────────────────────
cleanup_on_exit() {
safe_detach "$TEMP_DIR/mnt_sysos"
safe_detach "$TEMP_DIR/mnt_appos"
safe_detach "$TEMP_DIR/mnt_sysos"
safe_detach "$TEMP_DIR/mnt_appos"
}
trap cleanup_on_exit EXIT
@@ -208,20 +208,20 @@ MNT_APPOS="$TEMP_DIR/mnt_appos"
# Decrypt SystemOS AEA (cached — skip if already decrypted)
if [[ ! -f "$SYSOS_DMG" ]]; then
echo " Extracting AEA key..."
AEA_KEY=$(ipsw fw aea --key "$RESTORE_DIR/$CRYPTEX_SYSOS")
echo " key: $AEA_KEY"
echo " Decrypting SystemOS..."
aea decrypt -i "$RESTORE_DIR/$CRYPTEX_SYSOS" -o "$SYSOS_DMG" -key-value "$AEA_KEY"
echo " Extracting AEA key..."
AEA_KEY=$(ipsw fw aea --key "$RESTORE_DIR/$CRYPTEX_SYSOS")
echo " key: $AEA_KEY"
echo " Decrypting SystemOS..."
aea decrypt -i "$RESTORE_DIR/$CRYPTEX_SYSOS" -o "$SYSOS_DMG" -key-value "$AEA_KEY"
else
echo " Using cached SystemOS DMG"
echo " Using cached SystemOS DMG"
fi
# Copy AppOS (unencrypted, cached)
if [[ ! -f "$APPOS_DMG" ]]; then
cp "$RESTORE_DIR/$CRYPTEX_APPOS" "$APPOS_DMG"
cp "$RESTORE_DIR/$CRYPTEX_APPOS" "$APPOS_DMG"
else
echo " Using cached AppOS DMG"
echo " Using cached AppOS DMG"
fi
# Detach any leftover mounts from previous runs
@@ -243,8 +243,8 @@ echo ""
echo " Patching launchd (jetsam guard)..."
if ! remote_file_exists "/mnt1/sbin/launchd.bak"; then
echo " Creating backup..."
ssh_cmd "/bin/cp /mnt1/sbin/launchd /mnt1/sbin/launchd.bak"
echo " Creating backup..."
ssh_cmd "/bin/cp /mnt1/sbin/launchd /mnt1/sbin/launchd.bak"
fi
scp_from "/mnt1/sbin/launchd.bak" "$TEMP_DIR/launchd"
@@ -260,23 +260,23 @@ echo " [+] launchd patched"
echo " Checking APFS snapshots..."
SNAP_LIST=$(ssh_cmd "snaputil -l /mnt1 2>/dev/null" || true)
if echo "$SNAP_LIST" | grep -q "^orig-fs$"; then
echo " Snapshot 'orig-fs' already exists, skipping rename"
echo " Snapshot 'orig-fs' already exists, skipping rename"
else
UPDATE_SNAP=$(echo "$SNAP_LIST" | grep "^com\.apple\.os\.update-" | head -1)
if [[ -n "$UPDATE_SNAP" ]]; then
echo " Renaming snapshot: $UPDATE_SNAP -> orig-fs"
ssh_cmd "snaputil -n '$UPDATE_SNAP' orig-fs /mnt1"
# Verify rename succeeded
if ! ssh_cmd "snaputil -l /mnt1 2>/dev/null" | grep -q "^orig-fs$"; then
die "Failed to rename snapshot to orig-fs"
UPDATE_SNAP=$(echo "$SNAP_LIST" | grep "^com\.apple\.os\.update-" | head -1)
if [[ -n "$UPDATE_SNAP" ]]; then
echo " Renaming snapshot: $UPDATE_SNAP -> orig-fs"
ssh_cmd "snaputil -n '$UPDATE_SNAP' orig-fs /mnt1"
# Verify rename succeeded
if ! ssh_cmd "snaputil -l /mnt1 2>/dev/null" | grep -q "^orig-fs$"; then
die "Failed to rename snapshot to orig-fs"
fi
echo " Snapshot renamed, remounting..."
ssh_cmd "/sbin/umount /mnt1"
remote_mount /dev/disk1s1 /mnt1
echo " [+] Snapshot renamed to orig-fs"
else
echo " No com.apple.os.update- snapshot found, skipping"
fi
echo " Snapshot renamed, remounting..."
ssh_cmd "/sbin/umount /mnt1"
remote_mount /dev/disk1s1 /mnt1
echo " [+] Snapshot renamed to orig-fs"
else
echo " No com.apple.os.update- snapshot found, skipping"
fi
fi
ssh_cmd "/bin/rm -rf /mnt1/System/Cryptexes/App /mnt1/System/Cryptexes/OS"
@@ -308,8 +308,8 @@ echo "[2/7] Patching seputil..."
# Always patch from .bak (original unpatched binary)
if ! remote_file_exists "/mnt1/usr/libexec/seputil.bak"; then
echo " Creating backup..."
ssh_cmd "/bin/cp /mnt1/usr/libexec/seputil /mnt1/usr/libexec/seputil.bak"
echo " Creating backup..."
ssh_cmd "/bin/cp /mnt1/usr/libexec/seputil /mnt1/usr/libexec/seputil.bak"
fi
scp_from "/mnt1/usr/libexec/seputil.bak" "$TEMP_DIR/seputil"
@@ -364,8 +364,8 @@ echo "[5/7] Patching launchd_cache_loader..."
# Always patch from .bak (original unpatched binary)
if ! remote_file_exists "/mnt1/usr/libexec/launchd_cache_loader.bak"; then
echo " Creating backup..."
ssh_cmd "/bin/cp /mnt1/usr/libexec/launchd_cache_loader /mnt1/usr/libexec/launchd_cache_loader.bak"
echo " Creating backup..."
ssh_cmd "/bin/cp /mnt1/usr/libexec/launchd_cache_loader /mnt1/usr/libexec/launchd_cache_loader.bak"
fi
scp_from "/mnt1/usr/libexec/launchd_cache_loader.bak" "$TEMP_DIR/launchd_cache_loader"
@@ -382,8 +382,8 @@ echo "[6/7] Patching mobileactivationd..."
# Always patch from .bak (original unpatched binary)
if ! remote_file_exists "/mnt1/usr/libexec/mobileactivationd.bak"; then
echo " Creating backup..."
ssh_cmd "/bin/cp /mnt1/usr/libexec/mobileactivationd /mnt1/usr/libexec/mobileactivationd.bak"
echo " Creating backup..."
ssh_cmd "/bin/cp /mnt1/usr/libexec/mobileactivationd /mnt1/usr/libexec/mobileactivationd.bak"
fi
scp_from "/mnt1/usr/libexec/mobileactivationd.bak" "$TEMP_DIR/mobileactivationd"
@@ -402,36 +402,36 @@ echo "[7/7] Installing LaunchDaemons..."
VPHONED_SRC="$SCRIPT_DIR/vphoned"
VPHONED_BIN="$VPHONED_SRC/vphoned"
VPHONED_SRCS=(
"$VPHONED_SRC/vphoned.m"
"$VPHONED_SRC/vphoned_protocol.m"
"$VPHONED_SRC/vphoned_hid.m"
"$VPHONED_SRC/vphoned_devmode.m"
"$VPHONED_SRC/vphoned_location.m"
"$VPHONED_SRC/vphoned_files.m"
"$VPHONED_SRC/vphoned.m"
"$VPHONED_SRC/vphoned_protocol.m"
"$VPHONED_SRC/vphoned_hid.m"
"$VPHONED_SRC/vphoned_devmode.m"
"$VPHONED_SRC/vphoned_location.m"
"$VPHONED_SRC/vphoned_files.m"
)
needs_vphoned_build=0
if [[ ! -f "$VPHONED_BIN" ]]; then
needs_vphoned_build=1
needs_vphoned_build=1
else
for src in "${VPHONED_SRCS[@]}"; do
if [[ "$src" -nt "$VPHONED_BIN" ]]; then
needs_vphoned_build=1
break
fi
done
for src in "${VPHONED_SRCS[@]}"; do
if [[ "$src" -nt "$VPHONED_BIN" ]]; then
needs_vphoned_build=1
break
fi
done
fi
if [[ "$needs_vphoned_build" == "1" ]]; then
echo " Building vphoned for arm64..."
xcrun -sdk iphoneos clang -arch arm64 -Os -fobjc-arc \
-I"$VPHONED_SRC" \
-o "$VPHONED_BIN" "${VPHONED_SRCS[@]}" \
-framework Foundation
echo " Building vphoned for arm64..."
xcrun -sdk iphoneos clang -arch arm64 -Os -fobjc-arc \
-I"$VPHONED_SRC" \
-o "$VPHONED_BIN" "${VPHONED_SRCS[@]}" \
-framework Foundation
fi
cp "$VPHONED_BIN" "$TEMP_DIR/vphoned"
ldid \
-S"$VPHONED_SRC/entitlements.plist" \
-M "-K$VM_DIR/$CFW_INPUT/signcert.p12" \
"$TEMP_DIR/vphoned"
-S"$VPHONED_SRC/entitlements.plist" \
-M "-K$VM_DIR/$CFW_INPUT/signcert.p12" \
"$TEMP_DIR/vphoned"
scp_to "$TEMP_DIR/vphoned" "/mnt1/usr/bin/vphoned"
ssh_cmd "/bin/chmod 0755 /mnt1/usr/bin/vphoned"
# Keep a copy of the signed binary for host-side auto-update
@@ -440,8 +440,8 @@ echo " [+] vphoned installed (signed copy at .vphoned.signed)"
# Send daemon plists (overwrite on re-run)
for plist in bash.plist dropbear.plist trollvnc.plist rpcserver_ios.plist; do
scp_to "$INPUT_DIR/jb/LaunchDaemons/$plist" "/mnt1/System/Library/LaunchDaemons/"
ssh_cmd "/bin/chmod 0644 /mnt1/System/Library/LaunchDaemons/$plist"
scp_to "$INPUT_DIR/jb/LaunchDaemons/$plist" "/mnt1/System/Library/LaunchDaemons/"
ssh_cmd "/bin/chmod 0644 /mnt1/System/Library/LaunchDaemons/$plist"
done
scp_to "$VPHONED_SRC/vphoned.plist" "/mnt1/System/Library/LaunchDaemons/"
ssh_cmd "/bin/chmod 0644 /mnt1/System/Library/LaunchDaemons/vphoned.plist"
@@ -449,8 +449,8 @@ ssh_cmd "/bin/chmod 0644 /mnt1/System/Library/LaunchDaemons/vphoned.plist"
# Always patch launchd.plist from .bak (original)
echo " Patching launchd.plist..."
if ! remote_file_exists "/mnt1/System/Library/xpc/launchd.plist.bak"; then
echo " Creating backup..."
ssh_cmd "/bin/cp /mnt1/System/Library/xpc/launchd.plist /mnt1/System/Library/xpc/launchd.plist.bak"
echo " Creating backup..."
ssh_cmd "/bin/cp /mnt1/System/Library/xpc/launchd.plist /mnt1/System/Library/xpc/launchd.plist.bak"
fi
scp_from "/mnt1/System/Library/xpc/launchd.plist.bak" "$TEMP_DIR/launchd.plist"
@@ -471,10 +471,10 @@ ssh_cmd "/sbin/umount /mnt3 2>/dev/null || true"
# Only remove temp binaries
echo "[*] Cleaning up temp binaries..."
rm -f "$TEMP_DIR/seputil" \
"$TEMP_DIR/launchd_cache_loader" \
"$TEMP_DIR/mobileactivationd" \
"$TEMP_DIR/vphoned" \
"$TEMP_DIR/launchd.plist"
"$TEMP_DIR/launchd_cache_loader" \
"$TEMP_DIR/mobileactivationd" \
"$TEMP_DIR/vphoned" \
"$TEMP_DIR/launchd.plist"
echo ""
echo "[+] CFW installation complete!"
@@ -482,7 +482,7 @@ echo " Reboot the device for changes to take effect."
echo " After boot, SSH will be available on port 22222 (password: alpine)"
if [[ "$CFW_SKIP_HALT" == "1" ]]; then
echo "[*] CFW_SKIP_HALT=1, skipping halt."
echo "[*] CFW_SKIP_HALT=1, skipping halt."
else
ssh_cmd "/sbin/halt" || true
ssh_cmd "/sbin/halt" || true
fi

View File

@@ -42,89 +42,89 @@ SSH_HOST="localhost"
SSH_RETRY="${SSH_RETRY:-3}"
SSHPASS_BIN=""
SSH_OPTS=(
-o StrictHostKeyChecking=no
-o UserKnownHostsFile=/dev/null
-o PreferredAuthentications=password
-o ConnectTimeout=30
-q
-o StrictHostKeyChecking=no
-o UserKnownHostsFile=/dev/null
-o PreferredAuthentications=password
-o ConnectTimeout=30
-q
)
# ── Helpers ─────────────────────────────────────────────────────
die() {
echo "[-] $*" >&2
exit 1
echo "[-] $*" >&2
exit 1
}
check_prerequisites() {
local missing=()
command -v sshpass &>/dev/null || missing+=("sshpass")
command -v ldid &>/dev/null || missing+=("ldid (brew install ldid-procursus)")
if ((${#missing[@]} > 0)); then
die "Missing required tools: ${missing[*]}. Run: make setup_tools"
fi
SSHPASS_BIN="$(command -v sshpass)"
local missing=()
command -v sshpass &>/dev/null || missing+=("sshpass")
command -v ldid &>/dev/null || missing+=("ldid (brew install ldid-procursus)")
if ((${#missing[@]} > 0)); then
die "Missing required tools: ${missing[*]}. Run: make setup_tools"
fi
SSHPASS_BIN="$(command -v sshpass)"
}
_sshpass() {
"$SSHPASS_BIN" -p "$SSH_PASS" "$@"
"$SSHPASS_BIN" -p "$SSH_PASS" "$@"
}
_ssh_retry() {
local attempt rc label
label=${2-cmd}
for ((attempt = 1; attempt <= SSH_RETRY; attempt++)); do
"$@" && return 0
rc=$?
[[ $rc -ne 255 ]] && return $rc # real command failure — don't retry
echo " [${label}] connection lost (attempt $attempt/$SSH_RETRY), retrying in 3s..." >&2
sleep 3
done
return 255
local attempt rc label
label=${2:-cmd}
for ((attempt = 1; attempt <= SSH_RETRY; attempt++)); do
"$@" && return 0
rc=$?
[[ $rc -ne 255 ]] && return $rc # real command failure — don't retry
echo " [${label}] connection lost (attempt $attempt/$SSH_RETRY), retrying in 3s..." >&2
sleep 3
done
return 255
}
ssh_cmd() {
_ssh_retry _sshpass ssh "${SSH_OPTS[@]}" -p "$SSH_PORT" "$SSH_USER@$SSH_HOST" "$@";
ssh_cmd() {
_ssh_retry _sshpass ssh "${SSH_OPTS[@]}" -p "$SSH_PORT" "$SSH_USER@$SSH_HOST" "$@"
}
scp_to() {
_ssh_retry _sshpass scp -q "${SSH_OPTS[@]}" -P "$SSH_PORT" -r "$1" "$SSH_USER@$SSH_HOST:$2";
scp_to() {
_ssh_retry _sshpass scp -q "${SSH_OPTS[@]}" -P "$SSH_PORT" -r "$1" "$SSH_USER@$SSH_HOST:$2"
}
scp_from() {
_ssh_retry _sshpass scp -q "${SSH_OPTS[@]}" -P "$SSH_PORT" "$SSH_USER@$SSH_HOST:$1" "$2";
scp_from() {
_ssh_retry _sshpass scp -q "${SSH_OPTS[@]}" -P "$SSH_PORT" "$SSH_USER@$SSH_HOST:$1" "$2"
}
remote_file_exists() {
ssh_cmd "test -f '$1'" 2>/dev/null
ssh_cmd "test -f '$1'" 2>/dev/null
}
ldid_sign() {
local file="$1" bundle_id="${2:-}"
local args=(-S -M "-K$VM_DIR/$CFW_INPUT/signcert.p12")
[[ -n "$bundle_id" ]] && args+=("-I$bundle_id")
ldid "${args[@]}" "$file"
local file="$1" bundle_id="${2:-}"
local args=(-S -M "-K$VM_DIR/$CFW_INPUT/signcert.p12")
[[ -n "$bundle_id" ]] && args+=("-I$bundle_id")
ldid "${args[@]}" "$file"
}
remote_mount() {
local dev="$1" mnt="$2" opts="${3:-rw}"
ssh_cmd "/sbin/mount_apfs -o $opts $dev $mnt 2>/dev/null || true"
local dev="$1" mnt="$2" opts="${3:-rw}"
ssh_cmd "/sbin/mount_apfs -o $opts $dev $mnt 2>/dev/null || true"
}
get_boot_manifest_hash() {
ssh_cmd "/bin/ls /mnt5 2>/dev/null" | awk 'length($0)==96{print; exit}'
ssh_cmd "/bin/ls /mnt5 2>/dev/null" | awk 'length($0)==96{print; exit}'
}
# ── Setup JB input resources ──────────────────────────────────
setup_cfw_jb_input() {
[[ -d "$VM_DIR/$CFW_JB_INPUT" ]] && return
local archive
for search_dir in "$SCRIPT_DIR/resources" "$SCRIPT_DIR" "$VM_DIR"; do
archive="$search_dir/$CFW_JB_ARCHIVE"
if [[ -f "$archive" ]]; then
echo " Extracting $CFW_JB_ARCHIVE..."
tar --zstd -xf "$archive" -C "$VM_DIR"
return
fi
done
die "JB mode: neither $CFW_JB_INPUT/ nor $CFW_JB_ARCHIVE found"
[[ -d "$VM_DIR/$CFW_JB_INPUT" ]] && return
local archive
for search_dir in "$SCRIPT_DIR/resources" "$SCRIPT_DIR" "$VM_DIR"; do
archive="$search_dir/$CFW_JB_ARCHIVE"
if [[ -f "$archive" ]]; then
echo " Extracting $CFW_JB_ARCHIVE..."
tar --zstd -xf "$archive" -C "$VM_DIR"
return
fi
done
die "JB mode: neither $CFW_JB_INPUT/ nor $CFW_JB_ARCHIVE found"
}
# ── Check JB prerequisites ────────────────────────────────────
@@ -146,16 +146,16 @@ echo ""
echo "[JB-1] Patching launchd (jetsam guard + hook injection)..."
if ! remote_file_exists "/mnt1/sbin/launchd.bak"; then
echo " Creating backup..."
ssh_cmd "/bin/cp /mnt1/sbin/launchd /mnt1/sbin/launchd.bak"
echo " Creating backup..."
ssh_cmd "/bin/cp /mnt1/sbin/launchd /mnt1/sbin/launchd.bak"
fi
scp_from "/mnt1/sbin/launchd.bak" "$TEMP_DIR/launchd"
# Inject launchdhook.dylib load command (idempotent — skips if already present)
if [[ -d "$JB_INPUT_DIR/basebin" ]]; then
echo " Injecting LC_LOAD_DYLIB for /cores/launchdhook.dylib..."
python3 "$SCRIPT_DIR/patchers/cfw.py" inject-dylib "$TEMP_DIR/launchd" "/cores/launchdhook.dylib"
echo " Injecting LC_LOAD_DYLIB for /cores/launchdhook.dylib..."
python3 "$SCRIPT_DIR/patchers/cfw.py" inject-dylib "$TEMP_DIR/launchd" "/cores/launchdhook.dylib"
fi
python3 "$SCRIPT_DIR/patchers/cfw.py" patch-launchd-jetsam "$TEMP_DIR/launchd"
@@ -183,7 +183,7 @@ zstd -d -f "$BOOTSTRAP_ZST" -o "$BOOTSTRAP_TAR"
scp_to "$BOOTSTRAP_TAR" "/mnt5/$BOOT_HASH/bootstrap-iphoneos-arm64.tar"
if [[ -f "$SILEO_DEB" ]]; then
scp_to "$SILEO_DEB" "/mnt5/$BOOT_HASH/org.coolstar.sileo_2.5.1_iphoneos-arm64.deb"
scp_to "$SILEO_DEB" "/mnt5/$BOOT_HASH/org.coolstar.sileo_2.5.1_iphoneos-arm64.deb"
fi
ssh_cmd "/bin/mkdir -p /mnt5/$BOOT_HASH/jb-vphone"
@@ -203,23 +203,23 @@ echo " [+] procursus bootstrap installed"
# ═══════════ JB-3 DEPLOY BASEBIN HOOKS ═════════════════════════
BASEBIN_DIR="$JB_INPUT_DIR/basebin"
if [[ -d "$BASEBIN_DIR" ]]; then
echo ""
echo "[JB-3] Deploying BaseBin hooks to /cores/..."
echo ""
echo "[JB-3] Deploying BaseBin hooks to /cores/..."
ssh_cmd "/bin/mkdir -p /mnt1/cores"
ssh_cmd "/bin/chmod 0755 /mnt1/cores"
ssh_cmd "/bin/mkdir -p /mnt1/cores"
ssh_cmd "/bin/chmod 0755 /mnt1/cores"
for dylib in "$BASEBIN_DIR"/*.dylib; do
[[ -f "$dylib" ]] || continue
dylib_name="$(basename "$dylib")"
echo " Installing $dylib_name..."
# Re-sign with our certificate before deploying
ldid_sign "$dylib"
scp_to "$dylib" "/mnt1/cores/$dylib_name"
ssh_cmd "/bin/chmod 0755 /mnt1/cores/$dylib_name"
done
for dylib in "$BASEBIN_DIR"/*.dylib; do
[[ -f "$dylib" ]] || continue
dylib_name="$(basename "$dylib")"
echo " Installing $dylib_name..."
# Re-sign with our certificate before deploying
ldid_sign "$dylib"
scp_to "$dylib" "/mnt1/cores/$dylib_name"
ssh_cmd "/bin/chmod 0755 /mnt1/cores/$dylib_name"
done
echo " [+] BaseBin hooks deployed"
echo " [+] BaseBin hooks deployed"
fi
# ═══════════ CLEANUP ═════════════════════════════════════════
@@ -231,7 +231,7 @@ ssh_cmd "/sbin/umount /mnt5 2>/dev/null || true"
echo "[*] Cleaning up temp binaries..."
rm -f "$TEMP_DIR/launchd" \
"$TEMP_DIR/bootstrap-iphoneos-arm64.tar"
"$TEMP_DIR/bootstrap-iphoneos-arm64.tar"
echo ""
echo "[+] CFW + JB installation complete!"

View File

@@ -69,7 +69,6 @@ CLOUDOS_CACHE="${IPSW_DIR}/${CLOUDOS_DIR}"
extract() {
local zip="$1" cache="$2" out="$3"
# Extract to cache if not already done
if [[ -d "$cache" && -n "$(ls -A "$cache" 2>/dev/null)" ]]; then
echo "==> Cached: ${cache##*/}"
else
@@ -91,17 +90,17 @@ extract "$CLOUDOS_IPSW_PATH" "$CLOUDOS_CACHE" "$CLOUDOS_DIR"
# ── Merge cloudOS firmware into iPhone restore directory ──────────────
echo "==> Importing cloudOS firmware components ..."
cp ${CLOUDOS_DIR}/kernelcache.* "$IPHONE_DIR"/
cp "${CLOUDOS_DIR}"/kernelcache.* "$IPHONE_DIR"/
for sub in agx all_flash ane dfu pmp; do
cp ${CLOUDOS_DIR}/Firmware/${sub}/* "$IPHONE_DIR/Firmware/${sub}"/
cp "${CLOUDOS_DIR}/Firmware/${sub}"/* "$IPHONE_DIR/Firmware/${sub}"/
done
cp ${CLOUDOS_DIR}/Firmware/*.im4p "$IPHONE_DIR/Firmware"/
cp "${CLOUDOS_DIR}"/Firmware/*.im4p "$IPHONE_DIR/Firmware"/
# CloudOS ramdisk DMGs and trustcaches (RestoreRamDisk / RestoreTrustCache)
cp -n ${CLOUDOS_DIR}/*.dmg "$IPHONE_DIR"/ 2>/dev/null || true
cp -n ${CLOUDOS_DIR}/Firmware/*.dmg.trustcache "$IPHONE_DIR/Firmware"/ 2>/dev/null || true
cp -n "${CLOUDOS_DIR}"/*.dmg "$IPHONE_DIR"/ 2>/dev/null || true
cp -n "${CLOUDOS_DIR}"/Firmware/*.dmg.trustcache "$IPHONE_DIR/Firmware"/ 2>/dev/null || true
# ── Preserve original iPhone BuildManifest (cfw_install.sh reads Cryptex paths) ──
cp "$IPHONE_DIR/BuildManifest.plist" "$IPHONE_DIR/BuildManifest-iPhone.plist"

File diff suppressed because it is too large Load Diff

208
scripts/patchers/cfw_asm.py Normal file
View File

@@ -0,0 +1,208 @@
"""Shared helpers for CFW patch modules."""
#!/usr/bin/env python3
"""
patch_cfw.py — Dynamic binary patching for CFW installation on vphone600.
Uses capstone for disassembly-based anchoring and keystone for instruction
assembly, producing reliable, upgrade-proof patches.
Called by install_cfw.sh during CFW installation.
Commands:
cryptex-paths <BuildManifest.plist>
Print SystemOS and AppOS DMG paths from BuildManifest.
patch-seputil <binary>
Patch seputil gigalocker UUID to "AA".
patch-launchd-cache-loader <binary>
NOP the cache validation check in launchd_cache_loader.
patch-mobileactivationd <binary>
Patch -[DeviceType should_hactivate] to always return true.
patch-launchd-jetsam <binary>
Patch launchd jetsam panic guard to avoid initproc crash loop.
inject-daemons <launchd.plist> <daemon_dir>
Inject bash/dropbear/trollvnc into launchd.plist.
inject-dylib <binary> <dylib_path>
Inject LC_LOAD_DYLIB into Mach-O binary (thin or universal).
Equivalent to: optool install -c load -p <dylib_path> -t <binary>
Dependencies:
pip install capstone keystone-engine
"""
import os
import plistlib
import struct
import subprocess
import sys
from capstone import Cs, CS_ARCH_ARM64, CS_MODE_LITTLE_ENDIAN
from capstone.arm64_const import ARM64_OP_IMM
from keystone import Ks, KS_ARCH_ARM64, KS_MODE_LITTLE_ENDIAN as KS_MODE_LE
# ══════════════════════════════════════════════════════════════════
# ARM64 assembler / disassembler
# ══════════════════════════════════════════════════════════════════
_cs = Cs(CS_ARCH_ARM64, CS_MODE_LITTLE_ENDIAN)
_cs.detail = True
_ks = Ks(KS_ARCH_ARM64, KS_MODE_LE)
def asm(s):
enc, _ = _ks.asm(s)
if not enc:
raise RuntimeError(f"asm failed: {s}")
return bytes(enc)
def asm_at(s, addr):
enc, _ = _ks.asm(s, addr=addr)
if not enc:
raise RuntimeError(f"asm failed at 0x{addr:X}: {s}")
return bytes(enc)
NOP = asm("nop")
MOV_X0_1 = asm("mov x0, #1")
RET = asm("ret")
def rd32(data, off):
return struct.unpack_from("<I", data, off)[0]
def wr32(data, off, val):
struct.pack_into("<I", data, off, val)
def disasm_at(data, off, n=8):
"""Disassemble n instructions at file offset."""
return list(_cs.disasm(bytes(data[off : off + n * 4]), off))
def _log_asm(data, offset, count=5, marker_off=-1):
"""Log disassembly of `count` instructions at file offset for before/after comparison."""
insns = disasm_at(data, offset, count)
for insn in insns:
tag = " >>>" if insn.address == marker_off else " "
print(f" {tag} 0x{insn.address:08X}: {insn.mnemonic:8s} {insn.op_str}")
# ══════════════════════════════════════════════════════════════════
# Mach-O helpers
# ══════════════════════════════════════════════════════════════════
def parse_macho_sections(data):
"""Parse Mach-O 64-bit to extract section info.
Returns dict: "segment,section" -> (vm_addr, size, file_offset)
"""
magic = struct.unpack_from("<I", data, 0)[0]
if magic != 0xFEEDFACF:
raise ValueError(f"Not a 64-bit Mach-O (magic=0x{magic:X})")
ncmds = struct.unpack_from("<I", data, 16)[0]
sections = {}
offset = 32 # sizeof(mach_header_64)
for _ in range(ncmds):
cmd, cmdsize = struct.unpack_from("<II", data, offset)
if cmd == 0x19: # LC_SEGMENT_64
segname = data[offset + 8 : offset + 24].split(b"\x00")[0].decode()
nsects = struct.unpack_from("<I", data, offset + 64)[0]
sect_off = offset + 72
for _ in range(nsects):
sectname = data[sect_off : sect_off + 16].split(b"\x00")[0].decode()
addr = struct.unpack_from("<Q", data, sect_off + 32)[0]
size = struct.unpack_from("<Q", data, sect_off + 40)[0]
file_off = struct.unpack_from("<I", data, sect_off + 48)[0]
sections[f"{segname},{sectname}"] = (addr, size, file_off)
sect_off += 80
offset += cmdsize
return sections
def va_to_foff(data, va):
"""Convert virtual address to file offset using LC_SEGMENT_64 commands."""
ncmds = struct.unpack_from("<I", data, 16)[0]
offset = 32
for _ in range(ncmds):
cmd, cmdsize = struct.unpack_from("<II", data, offset)
if cmd == 0x19: # LC_SEGMENT_64
vmaddr = struct.unpack_from("<Q", data, offset + 24)[0]
vmsize = struct.unpack_from("<Q", data, offset + 32)[0]
fileoff = struct.unpack_from("<Q", data, offset + 40)[0]
if vmaddr <= va < vmaddr + vmsize:
return fileoff + (va - vmaddr)
offset += cmdsize
return -1
def find_section(sections, *candidates):
"""Find the first matching section from candidates."""
for name in candidates:
if name in sections:
return sections[name]
return None
def find_symtab(data):
"""Parse LC_SYMTAB from Mach-O header.
Returns (symoff, nsyms, stroff, strsize) or None.
"""
ncmds = struct.unpack_from("<I", data, 16)[0]
offset = 32
for _ in range(ncmds):
cmd, cmdsize = struct.unpack_from("<II", data, offset)
if cmd == 0x02: # LC_SYMTAB
symoff = struct.unpack_from("<I", data, offset + 8)[0]
nsyms = struct.unpack_from("<I", data, offset + 12)[0]
stroff = struct.unpack_from("<I", data, offset + 16)[0]
strsize = struct.unpack_from("<I", data, offset + 20)[0]
return symoff, nsyms, stroff, strsize
offset += cmdsize
return None
def find_symbol_va(data, name_fragment):
"""Search Mach-O symbol table for a symbol containing name_fragment.
Returns the symbol's VA, or -1 if not found.
"""
st = find_symtab(data)
if not st:
return -1
symoff, nsyms, stroff, strsize = st
for i in range(nsyms):
entry_off = symoff + i * 16 # sizeof(nlist_64)
n_strx = struct.unpack_from("<I", data, entry_off)[0]
n_value = struct.unpack_from("<Q", data, entry_off + 8)[0]
if n_strx >= strsize or n_value == 0:
continue
# Read null-terminated symbol name
end = data.index(0, stroff + n_strx)
sym_name = data[stroff + n_strx : end].decode("ascii", errors="replace")
if name_fragment in sym_name:
return n_value
return -1
# ══════════════════════════════════════════════════════════════════
# 1. seputil — Gigalocker UUID patch
# ══════════════════════════════════════════════════════════════════

View File

@@ -0,0 +1,68 @@
"""Daemon injection and cryptex path helpers."""
from .cfw_asm import *
import os
import plistlib
def parse_cryptex_paths(manifest_path):
"""Extract Cryptex DMG paths from BuildManifest.plist.
Searches ALL BuildIdentities for:
- Cryptex1,SystemOS -> Info -> Path
- Cryptex1,AppOS -> Info -> Path
vResearch IPSWs may have Cryptex entries in a non-first identity.
"""
with open(manifest_path, "rb") as f:
manifest = plistlib.load(f)
# Search all BuildIdentities for Cryptex paths
for bi in manifest.get("BuildIdentities", []):
m = bi.get("Manifest", {})
sysos = m.get("Cryptex1,SystemOS", {}).get("Info", {}).get("Path", "")
appos = m.get("Cryptex1,AppOS", {}).get("Info", {}).get("Path", "")
if sysos and appos:
return sysos, appos
print(
"[-] Cryptex1,SystemOS/AppOS paths not found in any BuildIdentity",
file=sys.stderr,
)
sys.exit(1)
# ══════════════════════════════════════════════════════════════════
# LaunchDaemon injection
# ══════════════════════════════════════════════════════════════════
def inject_daemons(plist_path, daemon_dir):
"""Inject bash/dropbear/trollvnc entries into launchd.plist."""
# Convert to XML first (macOS binary plist -> XML)
subprocess.run(["plutil", "-convert", "xml1", plist_path], capture_output=True)
with open(plist_path, "rb") as f:
target = plistlib.load(f)
for name in ("bash", "dropbear", "trollvnc", "vphoned", "rpcserver_ios"):
src = os.path.join(daemon_dir, f"{name}.plist")
if not os.path.exists(src):
print(f" [!] Missing {src}, skipping")
continue
with open(src, "rb") as f:
daemon = plistlib.load(f)
key = f"/System/Library/LaunchDaemons/{name}.plist"
target.setdefault("LaunchDaemons", {})[key] = daemon
print(f" [+] Injected {name}")
with open(plist_path, "wb") as f:
plistlib.dump(target, f, sort_keys=False)
# ══════════════════════════════════════════════════════════════════
# CLI
# ══════════════════════════════════════════════════════════════════

View File

@@ -0,0 +1,241 @@
"""LC_LOAD_DYLIB injection module."""
from .cfw_asm import *
def _align(n, alignment):
return (n + alignment - 1) & ~(alignment - 1)
def _find_first_section_offset(data):
"""Find the file offset of the earliest section data in the Mach-O.
This tells us how much space is available after load commands.
For fat/universal binaries, we operate on the first slice.
"""
magic = struct.unpack_from("<I", data, 0)[0]
if magic != 0xFEEDFACF:
return -1
ncmds = struct.unpack_from("<I", data, 16)[0]
offset = 32 # sizeof(mach_header_64)
earliest = len(data)
for _ in range(ncmds):
cmd, cmdsize = struct.unpack_from("<II", data, offset)
if cmd == 0x19: # LC_SEGMENT_64
nsects = struct.unpack_from("<I", data, offset + 64)[0]
sect_off = offset + 72
for _ in range(nsects):
file_off = struct.unpack_from("<I", data, sect_off + 48)[0]
size = struct.unpack_from("<Q", data, sect_off + 40)[0]
if file_off > 0 and size > 0 and file_off < earliest:
earliest = file_off
sect_off += 80
offset += cmdsize
return earliest
def _get_fat_slices(data):
"""Parse FAT (universal) binary header and return list of (offset, size) tuples.
Returns [(0, len(data))] for thin binaries.
"""
magic = struct.unpack_from(">I", data, 0)[0]
if magic == 0xCAFEBABE: # FAT_MAGIC
nfat = struct.unpack_from(">I", data, 4)[0]
slices = []
for i in range(nfat):
off = 8 + i * 20
slice_off = struct.unpack_from(">I", data, off + 8)[0]
slice_size = struct.unpack_from(">I", data, off + 12)[0]
slices.append((slice_off, slice_size))
return slices
elif magic == 0xBEBAFECA: # FAT_MAGIC_64
nfat = struct.unpack_from(">I", data, 4)[0]
slices = []
for i in range(nfat):
off = 8 + i * 32
slice_off = struct.unpack_from(">Q", data, off + 8)[0]
slice_size = struct.unpack_from(">Q", data, off + 16)[0]
slices.append((slice_off, slice_size))
return slices
else:
return [(0, len(data))]
def _check_existing_dylib(data, base, dylib_path):
"""Check if the dylib is already loaded in this Mach-O slice."""
magic = struct.unpack_from("<I", data, base)[0]
if magic != 0xFEEDFACF:
return False
ncmds = struct.unpack_from("<I", data, base + 16)[0]
offset = base + 32
for _ in range(ncmds):
cmd, cmdsize = struct.unpack_from("<II", data, offset)
if cmd in (0xC, 0xD, 0x18, 0x1F, 0x80000018):
# LC_LOAD_DYLIB, LC_LOAD_WEAK_DYLIB, LC_LAZY_LOAD_DYLIB,
# LC_REEXPORT_DYLIB, LC_LOAD_UPWARD_DYLIB
name_offset = struct.unpack_from("<I", data, offset + 8)[0]
name_end = data.index(0, offset + name_offset)
name = data[offset + name_offset : name_end].decode(
"ascii", errors="replace"
)
if name == dylib_path:
return True
offset += cmdsize
return False
def _strip_codesig(data, base):
"""Strip LC_CODE_SIGNATURE if it's the last load command.
Zeros out the command bytes and decrements ncmds/sizeofcmds.
Returns the cmdsize of the removed command, or 0 if not stripped.
Since the binary will be re-signed by ldid, this is always safe.
"""
ncmds = struct.unpack_from("<I", data, base + 16)[0]
sizeofcmds = struct.unpack_from("<I", data, base + 20)[0]
offset = base + 32
last_offset = -1
last_cmd = 0
last_cmdsize = 0
for i in range(ncmds):
cmd, cmdsize = struct.unpack_from("<II", data, offset)
if i == ncmds - 1:
last_offset = offset
last_cmd = cmd
last_cmdsize = cmdsize
offset += cmdsize
if last_cmd != 0x1D: # LC_CODE_SIGNATURE
return 0
# Zero out the LC_CODE_SIGNATURE command
data[last_offset : last_offset + last_cmdsize] = b"\x00" * last_cmdsize
# Update header
struct.pack_into("<I", data, base + 16, ncmds - 1)
struct.pack_into("<I", data, base + 20, sizeofcmds - last_cmdsize)
print(f" Stripped LC_CODE_SIGNATURE ({last_cmdsize} bytes freed)")
return last_cmdsize
def _inject_lc_load_dylib(data, base, dylib_path):
"""Inject LC_LOAD_DYLIB into a single Mach-O slice starting at `base`.
Strategy (matches optool/insert_dylib behavior):
1. Try to fit new LC in existing zero-padding after load commands.
2. If not enough space, strip LC_CODE_SIGNATURE (re-signed by ldid anyway).
3. If still not enough, allow header to overflow into section data
(same approach as optool — the overwritten bytes are typically stub
code that the jailbreak hook replaces).
Returns True on success.
"""
magic = struct.unpack_from("<I", data, base)[0]
if magic != 0xFEEDFACF:
print(f" [-] Not a 64-bit Mach-O at offset 0x{base:X}")
return False
ncmds = struct.unpack_from("<I", data, base + 16)[0]
sizeofcmds = struct.unpack_from("<I", data, base + 20)[0]
# Build the LC_LOAD_DYLIB command
name_bytes = dylib_path.encode("ascii") + b"\x00"
name_offset_in_cmd = 24 # sizeof(dylib_command) header
cmd_size = _align(name_offset_in_cmd + len(name_bytes), 8)
lc_data = bytearray(cmd_size)
struct.pack_into("<I", lc_data, 0, 0xC) # cmd = LC_LOAD_DYLIB
struct.pack_into("<I", lc_data, 4, cmd_size) # cmdsize
struct.pack_into("<I", lc_data, 8, name_offset_in_cmd) # name offset
struct.pack_into("<I", lc_data, 12, 2) # timestamp
struct.pack_into("<I", lc_data, 16, 0) # current_version
struct.pack_into("<I", lc_data, 20, 0) # compat_version
lc_data[name_offset_in_cmd : name_offset_in_cmd + len(name_bytes)] = name_bytes
# Check available space
header_end = base + 32 + sizeofcmds # end of current load commands
first_section = _find_first_section_offset(data[base:])
if first_section < 0:
print(f" [-] Could not determine section offsets")
return False
first_section_abs = base + first_section
available = first_section_abs - header_end
print(
f" Header end: 0x{header_end:X}, first section: 0x{first_section_abs:X}, "
f"available: {available}, need: {cmd_size}"
)
if available < cmd_size:
# Strip LC_CODE_SIGNATURE to reclaim header space (re-signed by ldid)
freed = _strip_codesig(data, base)
if freed > 0:
ncmds = struct.unpack_from("<I", data, base + 16)[0]
sizeofcmds = struct.unpack_from("<I", data, base + 20)[0]
header_end = base + 32 + sizeofcmds
available = first_section_abs - header_end
print(f" After strip: available={available}, need={cmd_size}")
if available < cmd_size:
overflow = cmd_size - available
# Allow up to 256 bytes overflow (same behavior as optool/insert_dylib)
if overflow > 256:
print(f" [-] Would overflow {overflow} bytes into section data (too much)")
return False
print(
f" [!] Header overflow: {overflow} bytes into section data "
f"(same as optool — binary will be re-signed)"
)
# Write the new load command at the end of existing commands
data[header_end : header_end + cmd_size] = lc_data
# Update header: ncmds += 1, sizeofcmds += cmd_size
struct.pack_into("<I", data, base + 16, ncmds + 1)
struct.pack_into("<I", data, base + 20, sizeofcmds + cmd_size)
return True
def inject_dylib(filepath, dylib_path):
"""Inject LC_LOAD_DYLIB into a Mach-O binary (thin or universal/FAT).
Equivalent to: optool install -c load -p <dylib_path> -t <filepath>
"""
data = bytearray(open(filepath, "rb").read())
slices = _get_fat_slices(bytes(data))
injected = 0
for slice_off, slice_size in slices:
if _check_existing_dylib(data, slice_off, dylib_path):
print(f" [!] Dylib already loaded in slice at 0x{slice_off:X}, skipping")
injected += 1
continue
if _inject_lc_load_dylib(data, slice_off, dylib_path):
print(
f" [+] Injected LC_LOAD_DYLIB '{dylib_path}' at slice 0x{slice_off:X}"
)
injected += 1
if injected == len(slices):
open(filepath, "wb").write(data)
print(f" [+] Wrote {filepath} ({injected} slice(s) patched)")
return True
else:
print(f" [-] Only {injected}/{len(slices)} slices patched")
return False
# ══════════════════════════════════════════════════════════════════
# BuildManifest parsing
# ══════════════════════════════════════════════════════════════════

View File

@@ -0,0 +1,219 @@
"""launchd cache loader patch module."""
from .cfw_asm import *
from .cfw_asm import _log_asm
from capstone import Cs, CS_ARCH_ARM64, CS_MODE_LITTLE_ENDIAN
_adrp_cs = Cs(CS_ARCH_ARM64, CS_MODE_LITTLE_ENDIAN)
_adrp_cs.detail = True
def patch_launchd_cache_loader(filepath):
"""NOP the cache validation check in launchd_cache_loader.
Anchor strategy:
Search for "unsecure_cache" substring, resolve to full null-terminated
string start, find ADRP+ADD xref to it, NOP the nearby cbz/cbnz branch.
The binary checks boot-arg "launchd_unsecure_cache=" — if not found,
it skips the unsecure path via a conditional branch. NOPping that branch
allows modified launchd.plist to be loaded.
"""
data = bytearray(open(filepath, "rb").read())
sections = parse_macho_sections(data)
text_sec = find_section(sections, "__TEXT,__text")
if not text_sec:
print(" [-] __TEXT,__text not found")
return False
text_va, text_size, text_foff = text_sec
# Strategy 1: Search for anchor strings in __cstring
# Code always references the START of a C string, so after finding a
# substring match, back-scan to the enclosing string's first byte.
cstring_sec = find_section(sections, "__TEXT,__cstring")
anchor_strings = [
b"unsecure_cache",
b"unsecure",
b"cache_valid",
b"validation",
]
for anchor_str in anchor_strings:
anchor_off = data.find(anchor_str)
if anchor_off < 0:
continue
# Find which section this belongs to and compute VA
anchor_sec_foff = -1
anchor_sec_va = -1
for sec_name, (sva, ssz, sfoff) in sections.items():
if sfoff <= anchor_off < sfoff + ssz:
anchor_sec_foff = sfoff
anchor_sec_va = sva
break
if anchor_sec_foff < 0:
continue
# Back-scan to the start of the enclosing null-terminated C string.
# Code loads strings from their beginning, not from a substring.
str_start_off = _find_cstring_start(data, anchor_off, anchor_sec_foff)
str_start_va = anchor_sec_va + (str_start_off - anchor_sec_foff)
substr_va = anchor_sec_va + (anchor_off - anchor_sec_foff)
if str_start_off != anchor_off:
end = data.index(0, str_start_off)
full_str = data[str_start_off:end].decode("ascii", errors="replace")
print(f" Found anchor '{anchor_str.decode()}' inside \"{full_str}\"")
print(
f" String start: va:0x{str_start_va:X} (match at va:0x{substr_va:X})"
)
else:
print(f" Found anchor '{anchor_str.decode()}' at va:0x{str_start_va:X}")
# Search __TEXT for ADRP+ADD that resolves to the string START VA
code = bytes(data[text_foff : text_foff + text_size])
ref_off = _find_adrp_add_ref(code, text_va, str_start_va)
if ref_off < 0:
# Also try the exact substring VA as fallback
ref_off = _find_adrp_add_ref(code, text_va, substr_va)
if ref_off < 0:
continue
ref_foff = text_foff + (ref_off - text_va)
print(f" Found string ref at 0x{ref_foff:X}")
# Find conditional branch AFTER the string ref (within +32 instructions).
# The pattern is: ADRP+ADD (load string) -> BL (call check) -> CBZ/CBNZ (branch on result)
# So only search forward from the ref, not backwards.
branch_foff = _find_nearby_branch(data, ref_foff, text_foff, text_size)
if branch_foff >= 0:
ctx_start = max(text_foff, branch_foff - 8)
print(f" Before:")
_log_asm(data, ctx_start, 5, branch_foff)
data[branch_foff : branch_foff + 4] = NOP
print(f" After:")
_log_asm(data, ctx_start, 5, branch_foff)
open(filepath, "wb").write(data)
print(f" [+] NOPped at 0x{branch_foff:X}")
return True
print(" [-] Dynamic anchor not found — all strategies exhausted")
return False
def _find_cstring_start(data, match_off, section_foff):
"""Find the start of the null-terminated C string containing match_off.
Scans backwards from match_off to find the previous null byte (or section
start). Returns the file offset of the first byte of the enclosing string.
This is needed because code always references the start of a string, not
a substring within it.
"""
pos = match_off - 1
while pos >= section_foff and data[pos] != 0:
pos -= 1
return pos + 1
def _find_adrp_add_ref(code, base_va, target_va):
"""Find ADRP+ADD pair that computes target_va in code.
Handles non-adjacent pairs: tracks recent ADRP results per register
and matches them with ADD instructions up to 8 instructions later.
"""
target_page = target_va & ~0xFFF
target_pageoff = target_va & 0xFFF
# Track recent ADRP instructions: reg -> (insn_va, page_value, instruction_index)
adrp_cache = {}
for off in range(0, len(code) - 4, 4):
insns = list(_adrp_cs.disasm(code[off : off + 4], base_va + off))
if not insns:
continue
insn = insns[0]
idx = off // 4
if insn.mnemonic == "adrp" and len(insn.operands) >= 2:
reg = insn.operands[0].reg
page = insn.operands[1].imm
adrp_cache[reg] = (insn.address, page, idx)
elif insn.mnemonic == "add" and len(insn.operands) >= 3:
src_reg = insn.operands[1].reg
imm = insn.operands[2].imm
if src_reg in adrp_cache:
adrp_va, page, adrp_idx = adrp_cache[src_reg]
# Only match if ADRP was within 8 instructions
if (
page == target_page
and imm == target_pageoff
and idx - adrp_idx <= 8
):
return adrp_va
return -1
def _find_nearby_branch(data, ref_foff, text_foff, text_size):
"""Find a conditional branch after a BL (function call) near ref_foff.
The typical pattern is:
ADRP+ADD (load string argument) ← ref_foff points here
... (setup other args)
BL (call check function)
CBZ/CBNZ (branch on return value)
Searches forward from ref_foff for a BL, then finds the first
conditional branch after it (within 8 instructions of the BL).
Falls back to first conditional branch within +32 instructions.
"""
branch_mnemonics = {"cbz", "cbnz", "tbz", "tbnz"}
# Strategy A: find BL → then first conditional branch after it
for delta in range(0, 16):
check_foff = ref_foff + delta * 4
if check_foff >= text_foff + text_size:
break
insns = disasm_at(data, check_foff, 1)
if not insns:
continue
if insns[0].mnemonic == "bl":
# Found a function call; scan the next 8 instructions for a branch
for d2 in range(1, 9):
br_foff = check_foff + d2 * 4
if br_foff >= text_foff + text_size:
break
br_insns = disasm_at(data, br_foff, 1)
if not br_insns:
continue
mn = br_insns[0].mnemonic
if mn in branch_mnemonics or mn.startswith("b."):
return br_foff
break # Found BL but no branch after it
# Strategy B: fallback — first conditional branch forward within 32 insns
for delta in range(1, 33):
check_foff = ref_foff + delta * 4
if check_foff >= text_foff + text_size:
break
insns = disasm_at(data, check_foff, 1)
if not insns:
continue
mn = insns[0].mnemonic
if mn in branch_mnemonics or mn.startswith("b."):
return check_foff
return -1
# ══════════════════════════════════════════════════════════════════
# 3. mobileactivationd — Hackivation bypass
# ══════════════════════════════════════════════════════════════════

View File

@@ -0,0 +1,155 @@
"""launchd jetsam patch module."""
from .cfw_asm import *
from .cfw_asm import _log_asm
from .cfw_patch_cache_loader import _find_adrp_add_ref, _find_cstring_start
def _extract_branch_target_off(insn):
for op in reversed(insn.operands):
if op.type == ARM64_OP_IMM:
return op.imm
return -1
def _is_return_block(data, foff, text_foff, text_size):
"""Check if foff points to a function return sequence (ret/retab within 8 insns)."""
for i in range(8):
check = foff + i * 4
if check >= text_foff + text_size:
break
insns = disasm_at(data, check, 1)
if not insns:
continue
if insns[0].mnemonic in ("ret", "retab"):
return True
# Stop at unconditional branches (different block)
if insns[0].mnemonic in ("b", "bl", "br", "blr"):
break
return False
def patch_launchd_jetsam(filepath):
"""Bypass launchd jetsam panic path via dynamic string-xref branch rewrite.
Anchor strategy:
1. Find jetsam panic string in cstring-like data.
2. Find ADRP+ADD xref to the string start in __TEXT,__text.
3. Search backward for a conditional branch whose target is the function's
return/success path (basic block containing ret/retab).
4. Rewrite that conditional branch to unconditional `b <same_target>`,
so the function always returns success and never reaches the panic.
"""
data = bytearray(open(filepath, "rb").read())
sections = parse_macho_sections(data)
text_sec = find_section(sections, "__TEXT,__text")
if not text_sec:
print(" [-] __TEXT,__text not found")
return False
text_va, text_size, text_foff = text_sec
code = bytes(data[text_foff : text_foff + text_size])
cond_mnemonics = {
"b.eq",
"b.ne",
"b.cs",
"b.hs",
"b.cc",
"b.lo",
"b.mi",
"b.pl",
"b.vs",
"b.vc",
"b.hi",
"b.ls",
"b.ge",
"b.lt",
"b.gt",
"b.le",
"cbz",
"cbnz",
"tbz",
"tbnz",
}
anchors = [
b"jetsam property category (Daemon) is not initialized",
b"jetsam property category",
b"initproc exited -- exit reason namespace 7 subcode 0x1",
]
for anchor in anchors:
hit_off = data.find(anchor)
if hit_off < 0:
continue
sec_foff = -1
sec_va = -1
for _, (sva, ssz, sfoff) in sections.items():
if sfoff <= hit_off < sfoff + ssz:
sec_foff = sfoff
sec_va = sva
break
if sec_foff < 0:
continue
str_start_off = _find_cstring_start(data, hit_off, sec_foff)
str_start_va = sec_va + (str_start_off - sec_foff)
ref_va = _find_adrp_add_ref(code, text_va, str_start_va)
if ref_va < 0:
continue
ref_foff = text_foff + (ref_va - text_va)
print(f" Found jetsam anchor '{anchor.decode(errors='ignore')}'")
print(f" string start: va:0x{str_start_va:X}")
print(f" xref at foff:0x{ref_foff:X}")
# Search backward from xref for conditional branches targeting
# the function's return path (block containing ret/retab).
# Pick the earliest (farthest back) one — it skips the most
# jetsam-related code and matches the upstream patch strategy.
scan_lo = max(text_foff, ref_foff - 0x300)
patch_off = -1
patch_target = -1
for back in range(ref_foff - 4, scan_lo - 1, -4):
insns = disasm_at(data, back, 1)
if not insns:
continue
insn = insns[0]
if insn.mnemonic not in cond_mnemonics:
continue
tgt = _extract_branch_target_off(insn)
if tgt < 0:
continue
# Target must be a valid file offset within __text
if tgt < text_foff or tgt >= text_foff + text_size:
continue
# Target must be a return block (contains ret/retab)
if _is_return_block(data, tgt, text_foff, text_size):
patch_off = back
patch_target = tgt
# Don't break — keep scanning for an earlier match
if patch_off < 0:
continue
ctx_start = max(text_foff, patch_off - 8)
print(f" Before:")
_log_asm(data, ctx_start, 5, patch_off)
data[patch_off : patch_off + 4] = asm_at(f"b #0x{patch_target:X}", patch_off)
print(f" After:")
_log_asm(data, ctx_start, 5, patch_off)
open(filepath, "wb").write(data)
print(f" [+] Patched at 0x{patch_off:X}: jetsam panic guard bypass")
return True
print(" [-] Dynamic jetsam anchor/xref not found")
return False

View File

@@ -0,0 +1,162 @@
"""mobileactivationd patch module."""
from .cfw_asm import *
from .cfw_asm import _log_asm
def _find_via_objc_metadata(data):
"""Find method IMP through ObjC runtime metadata."""
sections = parse_macho_sections(data)
# Find "should_hactivate\0" string
selector = b"should_hactivate\x00"
sel_foff = data.find(selector)
if sel_foff < 0:
print(" [-] Selector 'should_hactivate' not found in binary")
return -1
# Compute selector VA
sel_va = -1
for sec_name, (sva, ssz, sfoff) in sections.items():
if sfoff <= sel_foff < sfoff + ssz:
sel_va = sva + (sel_foff - sfoff)
break
if sel_va < 0:
print(f" [-] Could not compute VA for selector at foff:0x{sel_foff:X}")
return -1
print(f" Selector at foff:0x{sel_foff:X} va:0x{sel_va:X}")
# Find selref that points to this selector
selrefs = find_section(
sections,
"__DATA_CONST,__objc_selrefs",
"__DATA,__objc_selrefs",
"__AUTH_CONST,__objc_selrefs",
)
selref_foff = -1
selref_va = -1
if selrefs:
sr_va, sr_size, sr_foff = selrefs
for i in range(0, sr_size, 8):
ptr = struct.unpack_from("<Q", data, sr_foff + i)[0]
# Handle chained fixups: try exact and masked match
if ptr == sel_va or (ptr & 0x0000FFFFFFFFFFFF) == sel_va:
selref_foff = sr_foff + i
selref_va = sr_va + i
break
# Also try: lower 32 bits might encode the target in chained fixups
if (ptr & 0xFFFFFFFF) == (sel_va & 0xFFFFFFFF):
selref_foff = sr_foff + i
selref_va = sr_va + i
break
if selref_foff < 0:
print(" [-] Selref not found (chained fixups may obscure pointers)")
return -1
print(f" Selref at foff:0x{selref_foff:X} va:0x{selref_va:X}")
# Search for relative method list entry pointing to this selref
# Relative method entries: { int32 name_rel, int32 types_rel, int32 imp_rel }
# name_field_va + name_rel = selref_va
objc_const = find_section(
sections,
"__DATA_CONST,__objc_const",
"__DATA,__objc_const",
"__AUTH_CONST,__objc_const",
)
if objc_const:
oc_va, oc_size, oc_foff = objc_const
for i in range(0, oc_size - 12, 4):
entry_foff = oc_foff + i
entry_va = oc_va + i
rel_name = struct.unpack_from("<i", data, entry_foff)[0]
target_va = entry_va + rel_name
if target_va == selref_va:
# Found the method entry! Read IMP relative offset
imp_field_foff = entry_foff + 8
imp_field_va = entry_va + 8
rel_imp = struct.unpack_from("<i", data, imp_field_foff)[0]
imp_va = imp_field_va + rel_imp
imp_foff = va_to_foff(bytes(data), imp_va)
if imp_foff >= 0:
print(
f" Found via relative method list: IMP va:0x{imp_va:X} foff:0x{imp_foff:X}"
)
return imp_foff
else:
print(
f" [!] IMP va:0x{imp_va:X} could not be mapped to file offset"
)
return -1
# ══════════════════════════════════════════════════════════════════
# 5. Mach-O dylib injection (optool replacement)
# ══════════════════════════════════════════════════════════════════
def patch_mobileactivationd(filepath):
"""Dynamically find -[DeviceType should_hactivate] and patch to return YES.
Anchor strategies (in order):
1. Search LC_SYMTAB for symbol containing "should_hactivate"
2. Parse ObjC metadata: methnames -> selrefs -> method_list -> IMP
The method determines if the device should self-activate (hackivation).
Patching it to always return YES bypasses activation lock.
"""
data = bytearray(open(filepath, "rb").read())
imp_foff = -1
# Strategy 1: Symbol table lookup (most reliable)
imp_va = find_symbol_va(bytes(data), "should_hactivate")
if imp_va > 0:
imp_foff = va_to_foff(bytes(data), imp_va)
if imp_foff >= 0:
print(f" Found via symtab: va:0x{imp_va:X} -> foff:0x{imp_foff:X}")
# Strategy 2: ObjC metadata chain
if imp_foff < 0:
imp_foff = _find_via_objc_metadata(data)
# All dynamic strategies exhausted
if imp_foff < 0:
print(" [-] Dynamic anchor not found — all strategies exhausted")
return False
# Verify the target looks like code
if imp_foff + 8 > len(data):
print(f" [-] IMP offset 0x{imp_foff:X} out of bounds")
return False
print(f" Before:")
_log_asm(data, imp_foff, 4, imp_foff)
# Patch to: mov x0, #1; ret
data[imp_foff : imp_foff + 4] = MOV_X0_1
data[imp_foff + 4 : imp_foff + 8] = RET
print(f" After:")
_log_asm(data, imp_foff, 4, imp_foff)
open(filepath, "wb").write(data)
print(f" [+] Patched at 0x{imp_foff:X}: mov x0, #1; ret")
return True
# ══════════════════════════════════════════════════════════════════
# 4. launchd — Jetsam panic bypass
# ══════════════════════════════════════════════════════════════════

View File

@@ -0,0 +1,49 @@
"""seputil patch module."""
from .cfw_asm import *
def patch_seputil(filepath):
"""Dynamically find and patch the gigalocker path format string in seputil.
Anchor: The format string "/%s.gl" used by seputil to construct the
gigalocker file path as "{mountpoint}/{uuid}.gl".
Patching "%s" to "AA" in "/%s.gl" makes it "/AA.gl", so the
full path becomes /mnt7/AA.gl regardless of the device's UUID.
The actual .gl file on disk is also renamed to AA.gl.
"""
data = bytearray(open(filepath, "rb").read())
# Search for the format string "/%s.gl\0" — this is the gigalocker
# filename pattern where %s gets replaced with the device UUID.
anchor = b"/%s.gl\x00"
offset = data.find(anchor)
if offset < 0:
print(" [-] Format string '/%s.gl' not found in seputil")
return False
# The %s is at offset+1 (2 bytes: 0x25 0x73)
pct_s_off = offset + 1
original = bytes(data[offset : offset + len(anchor)])
print(f" Found format string at 0x{offset:X}: {original!r}")
print(f" Before: {bytes(data[offset : offset + 7]).hex(' ')}")
# Replace %s (2 bytes) with AA — turns "/%s.gl" into "/AA.gl"
data[pct_s_off] = ord("A")
data[pct_s_off + 1] = ord("A")
print(f" After: {bytes(data[offset : offset + 7]).hex(' ')}")
open(filepath, "wb").write(data)
print(f" [+] Patched at 0x{pct_s_off:X}: %s -> AA")
print(f" /{anchor[1:-1].decode()} -> /AA.gl")
return True
# ══════════════════════════════════════════════════════════════════
# 2. launchd_cache_loader — Unsecure cache bypass
# ══════════════════════════════════════════════════════════════════

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,81 @@
"""Shared asm/constants/helpers for kernel patchers."""
#!/usr/bin/env python3
"""
kernel_patcher.py — Dynamic kernel patcher for iOS prelinked kernelcaches.
Finds all patch sites by string anchors, ADRP+ADD cross-references,
BL frequency analysis, and Mach-O structure parsing. Nothing is hardcoded;
works across kernel variants (vresearch101, vphone600, etc.).
Dependencies: keystone-engine, capstone
"""
import struct, plistlib
from collections import defaultdict
from keystone import Ks, KS_ARCH_ARM64, KS_MODE_LITTLE_ENDIAN as KS_MODE_LE
from capstone import Cs, CS_ARCH_ARM64, CS_MODE_LITTLE_ENDIAN
from capstone.arm64_const import (
ARM64_OP_REG,
ARM64_OP_IMM,
ARM64_REG_W0,
ARM64_REG_X0,
ARM64_REG_X8,
)
# ── Assembly / disassembly helpers ───────────────────────────────
_ks = Ks(KS_ARCH_ARM64, KS_MODE_LE)
_cs = Cs(CS_ARCH_ARM64, CS_MODE_LITTLE_ENDIAN)
_cs.detail = True
def asm(s):
enc, _ = _ks.asm(s)
if not enc:
raise RuntimeError(f"asm failed: {s}")
return bytes(enc)
NOP = asm("nop")
MOV_X0_0 = asm("mov x0, #0")
MOV_X0_1 = asm("mov x0, #1")
MOV_W0_0 = asm("mov w0, #0")
MOV_W0_1 = asm("mov w0, #1")
RET = asm("ret")
CMP_W0_W0 = asm("cmp w0, w0")
CMP_X0_X0 = asm("cmp x0, x0")
def _asm_u32(s):
"""Assemble a single instruction and return its uint32 encoding."""
return struct.unpack("<I", asm(s))[0]
def _verify_disas(u32_val, expected_mnemonic):
"""Verify a uint32 encoding disassembles to expected mnemonic via capstone."""
code = struct.pack("<I", u32_val)
insns = list(_cs.disasm(code, 0, 1))
assert insns and insns[0].mnemonic == expected_mnemonic, (
f"0x{u32_val:08X} disassembles to {insns[0].mnemonic if insns else '???'}, expected {expected_mnemonic}"
)
return u32_val
# Named instruction constants (via keystone where possible, capstone-verified otherwise)
_PACIBSP_U32 = _asm_u32("hint #27") # keystone doesn't know 'pacibsp'
_RET_U32 = _asm_u32("ret")
_RETAA_U32 = _verify_disas(0xD65F0BFF, "retaa") # keystone can't assemble PAC returns
_RETAB_U32 = _verify_disas(0xD65F0FFF, "retab") # verified via capstone disassembly
_FUNC_BOUNDARY_U32S = frozenset((_RET_U32, _RETAA_U32, _RETAB_U32, _PACIBSP_U32))
def _rd32(buf, off):
return struct.unpack_from("<I", buf, off)[0]
def _rd64(buf, off):
return struct.unpack_from("<Q", buf, off)[0]
# ── KernelPatcher ────────────────────────────────────────────────

View File

@@ -0,0 +1,617 @@
"""Base class with all infrastructure for kernel patchers."""
import struct, plistlib
from collections import defaultdict
from capstone.arm64_const import (
ARM64_OP_REG,
ARM64_OP_IMM,
ARM64_REG_W0,
ARM64_REG_X0,
ARM64_REG_X8,
)
from .kernel_asm import (
_cs,
_rd32,
_rd64,
_PACIBSP_U32,
_FUNC_BOUNDARY_U32S,
)
class KernelPatcherBase:
def __init__(self, data, verbose=False):
self.data = data # bytearray (mutable)
self.raw = bytes(data) # immutable snapshot for searching
self.size = len(data)
self.patches = [] # collected (offset, bytes, description)
self.verbose = verbose
self._patch_num = 0 # running counter for clean one-liners
self._log("[*] Parsing Mach-O segments …")
self._parse_macho()
self._log("[*] Discovering kext code ranges from __PRELINK_INFO …")
self._discover_kext_ranges()
self._log("[*] Building ADRP index …")
self._build_adrp_index()
self._log("[*] Building BL index …")
self._build_bl_index()
self._find_panic()
self._log(
f"[*] _panic at foff 0x{self.panic_off:X} "
f"({len(self.bl_callers[self.panic_off])} callers)"
)
# ── Logging ──────────────────────────────────────────────────
def _log(self, msg):
if self.verbose:
print(msg)
# ── Mach-O / segment parsing ─────────────────────────────────
def _parse_macho(self):
"""Parse top-level Mach-O: discover BASE_VA, segments, code ranges."""
magic = _rd32(self.raw, 0)
if magic != 0xFEEDFACF:
raise ValueError(f"Not a 64-bit Mach-O (magic 0x{magic:08X})")
self.code_ranges = [] # [(start_foff, end_foff), ...]
self.all_segments = [] # [(name, vmaddr, fileoff, filesize, initprot)]
self.base_va = None
ncmds = struct.unpack_from("<I", self.raw, 16)[0]
off = 32 # past mach_header_64
for _ in range(ncmds):
cmd, cmdsize = struct.unpack_from("<II", self.raw, off)
if cmd == 0x19: # LC_SEGMENT_64
segname = self.raw[off + 8 : off + 24].split(b"\x00")[0].decode()
vmaddr, vmsize, fileoff, filesize = struct.unpack_from(
"<QQQQ", self.raw, off + 24
)
initprot = struct.unpack_from("<I", self.raw, off + 60)[0]
self.all_segments.append((segname, vmaddr, fileoff, filesize, initprot))
if segname == "__TEXT":
self.base_va = vmaddr
CODE_SEGS = ("__PRELINK_TEXT", "__TEXT_EXEC", "__TEXT_BOOT_EXEC")
if segname in CODE_SEGS and filesize > 0:
self.code_ranges.append((fileoff, fileoff + filesize))
off += cmdsize
if self.base_va is None:
raise ValueError("__TEXT segment not found — cannot determine BASE_VA")
self.code_ranges.sort()
total_mb = sum(e - s for s, e in self.code_ranges) / (1024 * 1024)
self._log(f" BASE_VA = 0x{self.base_va:016X}")
self._log(
f" {len(self.code_ranges)} executable ranges, total {total_mb:.1f} MB"
)
def _va(self, foff):
return self.base_va + foff
def _foff(self, va):
return va - self.base_va
# ── Kext range discovery ─────────────────────────────────────
def _discover_kext_ranges(self):
"""Parse __PRELINK_INFO + embedded kext Mach-Os to find code section ranges."""
self.kext_ranges = {} # bundle_id -> (text_start, text_end)
# Find __PRELINK_INFO segment
prelink_info = None
for name, vmaddr, fileoff, filesize, _ in self.all_segments:
if name == "__PRELINK_INFO":
prelink_info = (fileoff, filesize)
break
if prelink_info is None:
self._log(" [-] __PRELINK_INFO not found, using __TEXT_EXEC for all")
self._set_fallback_ranges()
return
foff, fsize = prelink_info
pdata = self.raw[foff : foff + fsize]
# Parse the XML plist
xml_start = pdata.find(b"<?xml")
xml_end = pdata.find(b"</plist>")
if xml_start < 0 or xml_end < 0:
self._log(" [-] __PRELINK_INFO plist not found")
self._set_fallback_ranges()
return
xml = pdata[xml_start : xml_end + len(b"</plist>")]
pl = plistlib.loads(xml)
items = pl.get("_PrelinkInfoDictionary", [])
# Kexts we need ranges for
WANTED = {
"com.apple.filesystems.apfs": "apfs",
"com.apple.security.sandbox": "sandbox",
"com.apple.driver.AppleMobileFileIntegrity": "amfi",
}
for item in items:
bid = item.get("CFBundleIdentifier", "")
tag = WANTED.get(bid)
if tag is None:
continue
exec_addr = item.get("_PrelinkExecutableLoadAddr", 0) & 0xFFFFFFFFFFFFFFFF
kext_foff = exec_addr - self.base_va
if kext_foff < 0 or kext_foff >= self.size:
continue
# Parse this kext's embedded Mach-O to find __TEXT_EXEC.__text
text_range = self._parse_kext_text_exec(kext_foff)
if text_range:
self.kext_ranges[tag] = text_range
self._log(
f" {tag:10s} __text: 0x{text_range[0]:08X} - 0x{text_range[1]:08X} "
f"({(text_range[1] - text_range[0]) // 1024} KB)"
)
# Derive the ranges used by patch methods
self._set_ranges_from_kexts()
def _parse_kext_text_exec(self, kext_foff):
"""Parse an embedded kext Mach-O header and return (__text start, end) in file offsets."""
if kext_foff + 32 > self.size:
return None
magic = _rd32(self.raw, kext_foff)
if magic != 0xFEEDFACF:
return None
ncmds = struct.unpack_from("<I", self.raw, kext_foff + 16)[0]
off = kext_foff + 32
for _ in range(ncmds):
if off + 8 > self.size:
break
cmd, cmdsize = struct.unpack_from("<II", self.raw, off)
if cmd == 0x19: # LC_SEGMENT_64
segname = self.raw[off + 8 : off + 24].split(b"\x00")[0].decode()
if segname == "__TEXT_EXEC":
vmaddr = struct.unpack_from("<Q", self.raw, off + 24)[0]
filesize = struct.unpack_from("<Q", self.raw, off + 48)[0]
nsects = struct.unpack_from("<I", self.raw, off + 64)[0]
# Parse sections to find __text
sect_off = off + 72
for _ in range(nsects):
if sect_off + 80 > self.size:
break
sectname = (
self.raw[sect_off : sect_off + 16]
.split(b"\x00")[0]
.decode()
)
if sectname == "__text":
sect_addr = struct.unpack_from(
"<Q", self.raw, sect_off + 32
)[0]
sect_size = struct.unpack_from(
"<Q", self.raw, sect_off + 40
)[0]
sect_foff = sect_addr - self.base_va
return (sect_foff, sect_foff + sect_size)
sect_off += 80
# No __text section found, use the segment
seg_foff = vmaddr - self.base_va
return (seg_foff, seg_foff + filesize)
off += cmdsize
return None
def _set_ranges_from_kexts(self):
"""Set patch-method ranges from discovered kext info, with fallbacks."""
# Full __TEXT_EXEC range
text_exec = None
for name, vmaddr, fileoff, filesize, _ in self.all_segments:
if name == "__TEXT_EXEC":
text_exec = (fileoff, fileoff + filesize)
break
if text_exec is None:
text_exec = (0, self.size)
self.text_exec_range = text_exec
self.apfs_text = self.kext_ranges.get("apfs", text_exec)
self.amfi_text = self.kext_ranges.get("amfi", text_exec)
self.sandbox_text = self.kext_ranges.get("sandbox", text_exec)
# Kernel code = full __TEXT_EXEC (includes all kexts, but that's OK)
self.kern_text = text_exec
def _set_fallback_ranges(self):
"""Use __TEXT_EXEC for everything when __PRELINK_INFO is unavailable."""
text_exec = None
for name, vmaddr, fileoff, filesize, _ in self.all_segments:
if name == "__TEXT_EXEC":
text_exec = (fileoff, fileoff + filesize)
break
if text_exec is None:
text_exec = (0, self.size)
self.text_exec_range = text_exec
self.apfs_text = text_exec
self.amfi_text = text_exec
self.sandbox_text = text_exec
self.kern_text = text_exec
# ── Index builders ───────────────────────────────────────────
def _build_adrp_index(self):
"""Index ADRP instructions by target page for O(1) string-ref lookup."""
self.adrp_by_page = defaultdict(list)
for rng_start, rng_end in self.code_ranges:
for off in range(rng_start, rng_end, 4):
insn = _rd32(self.raw, off)
if (insn & 0x9F000000) != 0x90000000:
continue
rd = insn & 0x1F
immhi = (insn >> 5) & 0x7FFFF
immlo = (insn >> 29) & 0x3
imm = (immhi << 2) | immlo
if imm & (1 << 20):
imm -= 1 << 21
pc = self._va(off)
page = (pc & ~0xFFF) + (imm << 12)
self.adrp_by_page[page].append((off, rd))
n = sum(len(v) for v in self.adrp_by_page.values())
self._log(f" {n} ADRP entries, {len(self.adrp_by_page)} distinct pages")
def _build_bl_index(self):
"""Index BL instructions by target offset."""
self.bl_callers = defaultdict(list) # target_off -> [caller_off, ...]
for rng_start, rng_end in self.code_ranges:
for off in range(rng_start, rng_end, 4):
insn = _rd32(self.raw, off)
if (insn & 0xFC000000) != 0x94000000:
continue
imm26 = insn & 0x3FFFFFF
if imm26 & (1 << 25):
imm26 -= 1 << 26
target = off + imm26 * 4
self.bl_callers[target].append(off)
def _find_panic(self):
"""Find _panic: most-called function whose callers reference '@%s:%d' strings."""
candidates = sorted(self.bl_callers.items(), key=lambda x: -len(x[1]))[:15]
for target_off, callers in candidates:
if len(callers) < 2000:
break
confirmed = 0
for caller_off in callers[:30]:
for back in range(caller_off - 4, max(caller_off - 32, 0), -4):
insn = _rd32(self.raw, back)
# ADD x0, x0, #imm
if (insn & 0xFFC003E0) == 0x91000000:
add_imm = (insn >> 10) & 0xFFF
if back >= 4:
prev = _rd32(self.raw, back - 4)
if (prev & 0x9F00001F) == 0x90000000: # ADRP x0
immhi = (prev >> 5) & 0x7FFFF
immlo = (prev >> 29) & 0x3
imm = (immhi << 2) | immlo
if imm & (1 << 20):
imm -= 1 << 21
pc = self._va(back - 4)
page = (pc & ~0xFFF) + (imm << 12)
str_foff = self._foff(page + add_imm)
if 0 <= str_foff < self.size - 10:
snippet = self.raw[str_foff : str_foff + 60]
if b"@%s:%d" in snippet or b"%s:%d" in snippet:
confirmed += 1
break
break
if confirmed >= 3:
self.panic_off = target_off
return
self.panic_off = candidates[2][0] if len(candidates) > 2 else candidates[0][0]
# ── Helpers ──────────────────────────────────────────────────
def _disas_at(self, off, count=1):
"""Disassemble *count* instructions at file offset. Returns a list."""
end = min(off + count * 4, self.size)
if off < 0 or off >= self.size:
return []
code = bytes(self.raw[off:end])
return list(_cs.disasm(code, off, count))
def _is_bl(self, off):
"""Return BL target file offset, or -1 if not a BL."""
insns = self._disas_at(off)
if insns and insns[0].mnemonic == "bl":
return insns[0].operands[0].imm
return -1
def _is_cond_branch_w0(self, off):
"""Return True if instruction is a conditional branch on w0 (cbz/cbnz/tbz/tbnz)."""
insns = self._disas_at(off)
if not insns:
return False
i = insns[0]
if i.mnemonic in ("cbz", "cbnz", "tbz", "tbnz"):
return (
i.operands[0].type == ARM64_OP_REG and i.operands[0].reg == ARM64_REG_W0
)
return False
def find_string(self, s, start=0):
"""Find string, return file offset of the enclosing C string start."""
if isinstance(s, str):
s = s.encode()
off = self.raw.find(s, start)
if off < 0:
return -1
# Walk backward to the preceding NUL — that's the C string start
cstr = off
while cstr > 0 and self.raw[cstr - 1] != 0:
cstr -= 1
return cstr
def find_string_refs(self, str_off, code_start=None, code_end=None):
"""Find all (adrp_off, add_off, dest_reg) referencing str_off via ADRP+ADD."""
target_va = self._va(str_off)
target_page = target_va & ~0xFFF
page_off = target_va & 0xFFF
refs = []
for adrp_off, rd in self.adrp_by_page.get(target_page, []):
if code_start is not None and adrp_off < code_start:
continue
if code_end is not None and adrp_off >= code_end:
continue
if adrp_off + 4 >= self.size:
continue
nxt = _rd32(self.raw, adrp_off + 4)
# ADD (imm) 64-bit: 1001_0001_00_imm12_Rn_Rd
if (nxt & 0xFFC00000) != 0x91000000:
continue
add_rn = (nxt >> 5) & 0x1F
add_imm = (nxt >> 10) & 0xFFF
if add_rn == rd and add_imm == page_off:
add_rd = nxt & 0x1F
refs.append((adrp_off, adrp_off + 4, add_rd))
return refs
def find_function_start(self, off, max_back=0x4000):
"""Walk backwards to find PACIBSP or STP x29,x30,[sp,#imm].
When STP x29,x30 is found, continues backward up to 0x20 more
bytes to look for PACIBSP (ARM64e functions may have several STP
instructions in the prologue before STP x29,x30).
"""
for o in range(off - 4, max(off - max_back, 0), -4):
insn = _rd32(self.raw, o)
if insn == _PACIBSP_U32:
return o
dis = self._disas_at(o)
if dis and dis[0].mnemonic == "stp" and "x29, x30, [sp" in dis[0].op_str:
# Check further back for PACIBSP (prologue may have
# multiple STP instructions before x29,x30)
for k in range(o - 4, max(o - 0x24, 0), -4):
if _rd32(self.raw, k) == _PACIBSP_U32:
return k
return o
return -1
def _disas_n(self, buf, off, count):
"""Disassemble *count* instructions from *buf* at file offset *off*."""
end = min(off + count * 4, len(buf))
if off < 0 or off >= len(buf):
return []
code = bytes(buf[off:end])
return list(_cs.disasm(code, off, count))
def _fmt_insn(self, insn, marker=""):
"""Format one capstone instruction for display."""
raw = insn.bytes
hex_str = " ".join(f"{b:02x}" for b in raw)
s = f" 0x{insn.address:08X}: {hex_str:12s} {insn.mnemonic:8s} {insn.op_str}"
if marker:
s += f" {marker}"
return s
def _print_patch_context(self, off, patch_bytes, desc):
"""Print disassembly before/after a patch site for debugging."""
ctx = 3 # instructions of context before and after
# -- BEFORE (original bytes) --
lines = [f" ┌─ PATCH 0x{off:08X}: {desc}"]
lines.append(" │ BEFORE:")
start = max(off - ctx * 4, 0)
before_insns = self._disas_n(self.raw, start, ctx + 1 + ctx)
for insn in before_insns:
if insn.address == off:
lines.append(self._fmt_insn(insn, " ◄━━ PATCHED"))
elif off < insn.address < off + len(patch_bytes):
lines.append(self._fmt_insn(insn, " ◄━━ PATCHED"))
else:
lines.append(self._fmt_insn(insn))
# -- AFTER (new bytes) --
lines.append(" │ AFTER:")
after_insns = self._disas_n(self.raw, start, ctx)
for insn in after_insns:
lines.append(self._fmt_insn(insn))
# Decode the patch bytes themselves
patch_insns = list(_cs.disasm(patch_bytes, off, len(patch_bytes) // 4))
for insn in patch_insns:
lines.append(self._fmt_insn(insn, " ◄━━ NEW"))
# Trailing context after the patch
trail_start = off + len(patch_bytes)
trail_insns = self._disas_n(self.raw, trail_start, ctx)
for insn in trail_insns:
lines.append(self._fmt_insn(insn))
lines.append(f" └─")
self._log("\n".join(lines))
def emit(self, off, patch_bytes, desc):
"""Record a patch and apply it to self.data immediately.
Writing through to self.data ensures _find_code_cave() sees
previously allocated shellcode and won't reuse the same cave.
"""
self.patches.append((off, patch_bytes, desc))
self.data[off : off + len(patch_bytes)] = patch_bytes
self._patch_num += 1
print(f" [{self._patch_num:2d}] 0x{off:08X} {desc}")
if self.verbose:
self._print_patch_context(off, patch_bytes, desc)
def _find_by_string_in_range(self, string, code_range, label):
"""Find string, find ADRP+ADD ref in code_range, return ref list."""
str_off = self.find_string(string)
if str_off < 0:
self._log(f" [-] string not found: {string!r}")
return []
refs = self.find_string_refs(str_off, code_range[0], code_range[1])
if not refs:
self._log(f" [-] no code refs to {label} (str at 0x{str_off:X})")
return refs
# ── Chained fixup pointer decoding ───────────────────────────
def _decode_chained_ptr(self, val):
"""Decode an arm64e chained fixup pointer to a file offset.
- auth rebase (bit63=1): foff = bits[31:0]
- non-auth rebase (bit63=0): VA = (bits[50:43] << 56) | bits[42:0]
"""
if val == 0:
return -1
if val & (1 << 63): # auth rebase
return val & 0xFFFFFFFF
else: # non-auth rebase
target = val & 0x7FFFFFFFFFF # bits[42:0]
high8 = (val >> 43) & 0xFF
full_va = (high8 << 56) | target
if full_va > self.base_va:
return full_va - self.base_va
return -1
# ═══════════════════════════════════════════════════════════════
# Per-patch finders
# ═══════════════════════════════════════════════════════════════
_COND_BRANCH_MNEMONICS = frozenset(
(
"b.eq",
"b.ne",
"b.cs",
"b.hs",
"b.cc",
"b.lo",
"b.mi",
"b.pl",
"b.vs",
"b.vc",
"b.hi",
"b.ls",
"b.ge",
"b.lt",
"b.gt",
"b.le",
"b.al",
"cbz",
"cbnz",
"tbz",
"tbnz",
)
)
def _decode_branch_target(self, off):
"""Decode conditional branch at off via capstone. Returns (target, mnemonic) or (None, None)."""
insns = self._disas_at(off)
if not insns:
return None, None
i = insns[0]
if i.mnemonic in self._COND_BRANCH_MNEMONICS:
# Target is always the last IMM operand
for op in reversed(i.operands):
if op.type == ARM64_OP_IMM:
return op.imm, i.mnemonic
return None, None
def _get_kernel_text_range(self):
"""Return (start, end) file offsets of the kernel's own __TEXT_EXEC.__text.
Parses fileset entries (LC_FILESET_ENTRY) to find the kernel component,
then reads its Mach-O header to get the __TEXT_EXEC.__text section.
Falls back to the full __TEXT_EXEC segment.
"""
# Try fileset entries
ncmds = struct.unpack_from("<I", self.raw, 16)[0]
off = 32
for _ in range(ncmds):
cmd, cmdsize = struct.unpack_from("<II", self.raw, off)
if cmd == 0x80000035: # LC_FILESET_ENTRY
vmaddr = struct.unpack_from("<Q", self.raw, off + 8)[0]
str_off_in_cmd = struct.unpack_from("<I", self.raw, off + 24)[0]
entry_id = self.raw[off + str_off_in_cmd :].split(b"\x00")[0].decode()
if entry_id == "com.apple.kernel":
kext_foff = vmaddr - self.base_va
text_range = self._parse_kext_text_exec(kext_foff)
if text_range:
return text_range
off += cmdsize
return self.kern_text
@staticmethod
def _is_func_boundary(insn):
"""Return True if *insn* typically ends/starts a function."""
return insn in _FUNC_BOUNDARY_U32S
def _find_sandbox_ops_table_via_conf(self):
"""Find Sandbox mac_policy_ops table via mac_policy_conf struct."""
self._log("\n[*] Finding Sandbox mac_policy_ops via mac_policy_conf...")
seatbelt_off = self.find_string(b"Seatbelt sandbox policy")
sandbox_raw = self.raw.find(b"\x00Sandbox\x00")
sandbox_off = sandbox_raw + 1 if sandbox_raw >= 0 else -1
if seatbelt_off < 0 or sandbox_off < 0:
self._log(" [-] Sandbox/Seatbelt strings not found")
return None
self._log(
f" [*] Sandbox string at foff 0x{sandbox_off:X}, "
f"Seatbelt at 0x{seatbelt_off:X}"
)
data_ranges = []
for name, vmaddr, fileoff, filesize, prot in self.all_segments:
if name in ("__DATA_CONST", "__DATA") and filesize > 0:
data_ranges.append((fileoff, fileoff + filesize))
for d_start, d_end in data_ranges:
for i in range(d_start, d_end - 40, 8):
val = _rd64(self.raw, i)
if val == 0 or (val & (1 << 63)):
continue
if (val & 0x7FFFFFFFFFF) != sandbox_off:
continue
val2 = _rd64(self.raw, i + 8)
if (val2 & (1 << 63)) or (val2 & 0x7FFFFFFFFFF) != seatbelt_off:
continue
val_ops = _rd64(self.raw, i + 32)
if not (val_ops & (1 << 63)):
ops_off = val_ops & 0x7FFFFFFFFFF
self._log(
f" [+] mac_policy_conf at foff 0x{i:X}, "
f"mpc_ops -> 0x{ops_off:X}"
)
return ops_off
self._log(" [-] mac_policy_conf not found")
return None
def _read_ops_entry(self, table_off, index):
"""Read a function pointer from the ops table, handling chained fixups."""
off = table_off + index * 8
if off + 8 > self.size:
return -1
val = _rd64(self.raw, off)
if val == 0:
return 0
return self._decode_chained_ptr(val)

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,273 @@
"""kernel_jb_base.py — JB base class with infrastructure methods."""
import struct
from collections import Counter
from capstone.arm64_const import (
ARM64_OP_REG,
ARM64_OP_IMM,
ARM64_OP_MEM,
ARM64_REG_X0,
ARM64_REG_X1,
ARM64_REG_W0,
ARM64_REG_X8,
)
from .kernel import (
KernelPatcher,
NOP,
MOV_X0_0,
MOV_X0_1,
MOV_W0_0,
MOV_W0_1,
CMP_W0_W0,
CMP_X0_X0,
RET,
asm,
_rd32,
_rd64,
)
CBZ_X2_8 = asm("cbz x2, #8")
STR_X0_X2 = asm("str x0, [x2]")
CMP_XZR_XZR = asm("cmp xzr, xzr")
MOV_X8_XZR = asm("mov x8, xzr")
class KernelJBPatcherBase(KernelPatcher):
def __init__(self, data, verbose=False):
super().__init__(data, verbose)
self._build_symbol_table()
# ── Symbol table (best-effort, may find 0 on stripped kernels) ──
def _build_symbol_table(self):
"""Parse nlist entries from LC_SYMTAB to build symbol→foff map."""
self.symbols = {}
# Parse top-level LC_SYMTAB
ncmds = struct.unpack_from("<I", self.raw, 16)[0]
off = 32
for _ in range(ncmds):
if off + 8 > self.size:
break
cmd, cmdsize = struct.unpack_from("<II", self.raw, off)
if cmd == 0x2: # LC_SYMTAB
symoff = struct.unpack_from("<I", self.raw, off + 8)[0]
nsyms = struct.unpack_from("<I", self.raw, off + 12)[0]
stroff = struct.unpack_from("<I", self.raw, off + 16)[0]
self._parse_nlist(symoff, nsyms, stroff)
off += cmdsize
# Parse fileset entries' LC_SYMTAB
off = 32
for _ in range(ncmds):
if off + 8 > self.size:
break
cmd, cmdsize = struct.unpack_from("<II", self.raw, off)
if cmd == 0x80000035: # LC_FILESET_ENTRY
# fileoff is at off+16
foff_entry = struct.unpack_from("<Q", self.raw, off + 16)[0]
self._parse_fileset_symtab(foff_entry)
off += cmdsize
self._log(f"[*] Symbol table: {len(self.symbols)} symbols resolved")
def _parse_fileset_symtab(self, mh_off):
"""Parse LC_SYMTAB from a fileset entry Mach-O."""
if mh_off < 0 or mh_off + 32 > self.size:
return
magic = _rd32(self.raw, mh_off)
if magic != 0xFEEDFACF:
return
ncmds = struct.unpack_from("<I", self.raw, mh_off + 16)[0]
off = mh_off + 32
for _ in range(ncmds):
if off + 8 > self.size:
break
cmd, cmdsize = struct.unpack_from("<II", self.raw, off)
if cmd == 0x2: # LC_SYMTAB
symoff = struct.unpack_from("<I", self.raw, off + 8)[0]
nsyms = struct.unpack_from("<I", self.raw, off + 12)[0]
stroff = struct.unpack_from("<I", self.raw, off + 16)[0]
self._parse_nlist(symoff, nsyms, stroff)
off += cmdsize
def _parse_nlist(self, symoff, nsyms, stroff):
"""Parse nlist64 entries: add defined function symbols to self.symbols."""
for i in range(nsyms):
entry_off = symoff + i * 16
if entry_off + 16 > self.size:
break
n_strx, n_type, n_sect, n_desc, n_value = struct.unpack_from(
"<IBBHQ", self.raw, entry_off
)
if n_type & 0x0E != 0x0E:
continue
if n_value == 0:
continue
name_off = stroff + n_strx
if name_off >= self.size:
continue
name_end = self.raw.find(b"\x00", name_off)
if name_end < 0 or name_end - name_off > 512:
continue
name = self.raw[name_off:name_end].decode("ascii", errors="replace")
foff = n_value - self.base_va
if 0 <= foff < self.size:
self.symbols[name] = foff
def _resolve_symbol(self, name):
"""Look up a function symbol, return file offset or -1."""
return self.symbols.get(name, -1)
# ── Code cave finder ──────────────────────────────────────────
def _find_code_cave(self, size, align=4):
"""Find a region of zeros/0xFF/UDF in executable memory for shellcode.
Returns file offset of the cave start, or -1 if not found.
Reads from self.data (mutable) so previously allocated caves are skipped.
"""
needed = (size + align - 1) // align * align
for rng_start, rng_end in self.code_ranges:
run_start = -1
run_len = 0
for off in range(rng_start, rng_end, 4):
val = _rd32(self.data, off)
if val == 0x00000000 or val == 0xFFFFFFFF or val == 0xD4200000:
if run_start < 0:
run_start = off
run_len = 4
else:
run_len += 4
if run_len >= needed:
return run_start
else:
run_start = -1
run_len = 0
return -1
# ── Branch encoding helpers ───────────────────────────────────
def _encode_b(self, from_off, to_off):
"""Encode an unconditional B instruction."""
delta = (to_off - from_off) // 4
if delta < -(1 << 25) or delta >= (1 << 25):
return None
return struct.pack("<I", 0x14000000 | (delta & 0x3FFFFFF))
def _encode_bl(self, from_off, to_off):
"""Encode a BL instruction."""
delta = (to_off - from_off) // 4
if delta < -(1 << 25) or delta >= (1 << 25):
return None
return struct.pack("<I", 0x94000000 | (delta & 0x3FFFFFF))
# ── Function finding helpers ──────────────────────────────────
def _find_func_end(self, func_start, max_size=0x4000):
"""Find the end of a function (next PACIBSP or limit)."""
limit = min(func_start + max_size, self.size)
for off in range(func_start + 4, limit, 4):
d = self._disas_at(off)
if d and d[0].mnemonic == "pacibsp":
return off
return limit
def _find_bl_to_panic_in_range(self, start, end):
"""Find first BL to _panic in range, return offset or -1."""
for off in range(start, end, 4):
bl_target = self._is_bl(off)
if bl_target == self.panic_off:
return off
return -1
def _find_func_by_string(self, string, code_range=None):
"""Find a function that references a given string.
Returns the function start (PACIBSP), or -1.
"""
str_off = self.find_string(string)
if str_off < 0:
return -1
if code_range:
refs = self.find_string_refs(str_off, *code_range)
else:
refs = self.find_string_refs(str_off)
if not refs:
return -1
func_start = self.find_function_start(refs[0][0])
return func_start
def _find_func_containing_string(self, string, code_range=None):
"""Find a function containing a string reference.
Returns (func_start, func_end, refs) or (None, None, None).
"""
str_off = self.find_string(string)
if str_off < 0:
return None, None, None
if code_range:
refs = self.find_string_refs(str_off, *code_range)
else:
refs = self.find_string_refs(str_off)
if not refs:
return None, None, None
func_start = self.find_function_start(refs[0][0])
if func_start < 0:
return None, None, None
func_end = self._find_func_end(func_start)
return func_start, func_end, refs
def _find_nosys(self):
"""Find _nosys: a tiny function that returns ENOSYS (78 = 0x4e).
Pattern: mov w0, #0x4e; ret (or with PACIBSP wrapper).
"""
# Search for: mov w0, #0x4e (= 0x528009C0) followed by ret (= 0xD65F03C0)
mov_w0_4e = struct.unpack("<I", asm("mov w0, #0x4e"))[0]
ret_val = struct.unpack("<I", RET)[0]
for s, e in self.code_ranges:
for off in range(s, e - 4, 4):
v0 = _rd32(self.raw, off)
v1 = _rd32(self.raw, off + 4)
if v0 == mov_w0_4e and v1 == ret_val:
return off
# Also check with PACIBSP prefix
if v0 == 0xD503237F and v1 == mov_w0_4e:
v2 = _rd32(self.raw, off + 8)
if v2 == ret_val:
return off
return -1
# ══════════════════════════════════════════════════════════════
# Patch dispatcher
# ══════════════════════════════════════════════════════════════
# Re-export for patch mixins
__all__ = [
"KernelJBPatcherBase",
"CBZ_X2_8",
"STR_X0_X2",
"CMP_XZR_XZR",
"MOV_X8_XZR",
"NOP",
"MOV_X0_0",
"MOV_X0_1",
"MOV_W0_0",
"MOV_W0_1",
"CMP_W0_W0",
"CMP_X0_X0",
"RET",
"asm",
"_rd32",
"_rd64",
"struct",
"Counter",
"ARM64_OP_REG",
"ARM64_OP_IMM",
"ARM64_OP_MEM",
"ARM64_REG_X0",
"ARM64_REG_X1",
"ARM64_REG_W0",
"ARM64_REG_X8",
]

View File

@@ -0,0 +1,67 @@
"""Mixin: KernelJBPatchAmfiExecveMixin."""
from .kernel_jb_base import MOV_X0_0
class KernelJBPatchAmfiExecveMixin:
def patch_amfi_execve_kill_path(self):
"""Bypass AMFI execve kill helpers (string xref -> function local pair)."""
self._log("\n[JB] AMFI execve kill path: BL -> mov x0,#0 (2 sites)")
str_off = self.find_string(b"AMFI: hook..execve() killing")
if str_off < 0:
str_off = self.find_string(b"execve() killing")
if str_off < 0:
self._log(" [-] execve kill log string not found")
return False
refs = self.find_string_refs(str_off, *self.kern_text)
if not refs:
refs = self.find_string_refs(str_off)
if not refs:
self._log(" [-] no refs to execve kill log string")
return False
patched = False
seen_funcs = set()
for adrp_off, _, _ in refs:
func_start = self.find_function_start(adrp_off)
if func_start < 0 or func_start in seen_funcs:
continue
seen_funcs.add(func_start)
func_end = min(func_start + 0x800, self.kern_text[1])
for p in range(func_start + 4, func_end, 4):
d = self._disas_at(p)
if d and d[0].mnemonic == "pacibsp":
func_end = p
break
early_window_end = min(func_start + 0x120, func_end)
hits = []
for off in range(func_start, early_window_end - 4, 4):
d0 = self._disas_at(off)
d1 = self._disas_at(off + 4)
if not d0 or not d1:
continue
i0, i1 = d0[0], d1[0]
if i0.mnemonic != "bl":
continue
if i1.mnemonic in ("cbz", "cbnz") and i1.op_str.startswith("w0,"):
hits.append(off)
if len(hits) != 2:
self._log(
f" [-] execve helper at 0x{func_start:X}: "
f"expected 2 early BL+W0-branch sites, found {len(hits)}"
)
continue
self.emit(hits[0], MOV_X0_0, "mov x0,#0 [AMFI execve helper A]")
self.emit(hits[1], MOV_X0_0, "mov x0,#0 [AMFI execve helper B]")
patched = True
break
if not patched:
self._log(" [-] AMFI execve helper patch sites not found")
return patched

View File

@@ -0,0 +1,89 @@
"""Mixin: KernelJBPatchAmfiTrustcacheMixin."""
from .kernel_jb_base import MOV_X0_1, CBZ_X2_8, STR_X0_X2, RET
class KernelJBPatchAmfiTrustcacheMixin:
def patch_amfi_cdhash_in_trustcache(self):
"""AMFIIsCDHashInTrustCache rewrite (semantic function matching)."""
self._log("\n[JB] AMFIIsCDHashInTrustCache: always allow + store flag")
def _find_after(insns, start, pred):
for idx in range(start, len(insns)):
if pred(insns[idx]):
return idx
return -1
hits = []
s, e = self.amfi_text
for off in range(s, e - 4, 4):
d0 = self._disas_at(off)
if not d0 or d0[0].mnemonic != "pacibsp":
continue
func_end = min(off + 0x200, e)
for p in range(off + 4, func_end, 4):
dp = self._disas_at(p)
if dp and dp[0].mnemonic == "pacibsp":
func_end = p
break
insns = []
for p in range(off, func_end, 4):
d = self._disas_at(p)
if not d:
break
insns.append(d[0])
i1 = _find_after(
insns, 0, lambda x: x.mnemonic == "mov" and x.op_str == "x19, x2"
)
if i1 < 0:
continue
i2 = _find_after(
insns,
i1 + 1,
lambda x: x.mnemonic == "stp" and x.op_str.startswith("xzr, xzr, [sp"),
)
if i2 < 0:
continue
i3 = _find_after(
insns, i2 + 1, lambda x: x.mnemonic == "mov" and x.op_str == "x2, sp"
)
if i3 < 0:
continue
i4 = _find_after(insns, i3 + 1, lambda x: x.mnemonic == "bl")
if i4 < 0:
continue
i5 = _find_after(
insns, i4 + 1, lambda x: x.mnemonic == "mov" and x.op_str == "x20, x0"
)
if i5 < 0:
continue
i6 = _find_after(
insns,
i5 + 1,
lambda x: x.mnemonic == "cbnz" and x.op_str.startswith("w0,"),
)
if i6 < 0:
continue
i7 = _find_after(
insns,
i6 + 1,
lambda x: x.mnemonic == "cbz" and x.op_str.startswith("x19,"),
)
if i7 < 0:
continue
hits.append(off)
if len(hits) != 1:
self._log(f" [-] expected 1 AMFI trustcache body hit, found {len(hits)}")
return False
func_start = hits[0]
self.emit(func_start, MOV_X0_1, "mov x0,#1 [AMFIIsCDHashInTrustCache]")
self.emit(func_start + 4, CBZ_X2_8, "cbz x2,+8 [AMFIIsCDHashInTrustCache]")
self.emit(func_start + 8, STR_X0_X2, "str x0,[x2] [AMFIIsCDHashInTrustCache]")
self.emit(func_start + 12, RET, "ret [AMFIIsCDHashInTrustCache]")
return True

View File

@@ -0,0 +1,69 @@
"""Mixin: KernelJBPatchBsdInitAuthMixin."""
from .kernel_jb_base import MOV_X0_0
class KernelJBPatchBsdInitAuthMixin:
def patch_bsd_init_auth(self):
"""Bypass rootvp authentication check in _bsd_init.
Pattern: ldr x0, [xN, #0x2b8]; cbz x0, ...; bl AUTH_FUNC
Replace the BL with mov x0, #0.
"""
self._log("\n[JB] _bsd_init: mov x0,#0 (auth bypass)")
# Try symbol first
foff = self._resolve_symbol("_bsd_init")
if foff >= 0:
func_end = self._find_func_end(foff, 0x2000)
result = self._find_auth_bl(foff, func_end)
if result:
self.emit(result, MOV_X0_0, "mov x0,#0 [_bsd_init auth]")
return True
# Pattern search: ldr x0, [xN, #0x2b8]; cbz x0; bl
ks, ke = self.kern_text
candidates = []
for off in range(ks, ke - 8, 4):
d = self._disas_at(off, 3)
if len(d) < 3:
continue
i0, i1, i2 = d[0], d[1], d[2]
if i0.mnemonic != "ldr" or i1.mnemonic != "cbz" or i2.mnemonic != "bl":
continue
if not i0.op_str.startswith("x0,"):
continue
if "#0x2b8" not in i0.op_str:
continue
if not i1.op_str.startswith("x0,"):
continue
candidates.append(off + 8) # the BL offset
if not candidates:
self._log(" [-] ldr+cbz+bl pattern not found")
return False
# Filter to kern_text range (exclude kexts)
kern_candidates = [c for c in candidates if ks <= c < ke]
if not kern_candidates:
kern_candidates = candidates
# Pick the last one in the kernel (bsd_init is typically late in boot)
bl_off = kern_candidates[-1]
self._log(
f" [+] auth BL at 0x{bl_off:X} ({len(kern_candidates)} kern candidates)"
)
self.emit(bl_off, MOV_X0_0, "mov x0,#0 [_bsd_init auth]")
return True
def _find_auth_bl(self, start, end):
"""Find ldr x0,[xN,#0x2b8]; cbz x0; bl pattern. Returns BL offset."""
for off in range(start, end - 8, 4):
d = self._disas_at(off, 3)
if len(d) < 3:
continue
i0, i1, i2 = d[0], d[1], d[2]
if i0.mnemonic == "ldr" and i1.mnemonic == "cbz" and i2.mnemonic == "bl":
if i0.op_str.startswith("x0,") and "#0x2b8" in i0.op_str:
if i1.op_str.startswith("x0,"):
return off + 8
return None

View File

@@ -0,0 +1,133 @@
"""Mixin: KernelJBPatchCredLabelMixin."""
from .kernel_jb_base import asm, _rd32, RET
class KernelJBPatchCredLabelMixin:
def patch_cred_label_update_execve(self):
"""Redirect _cred_label_update_execve to shellcode that sets cs_flags.
Shellcode: LDR x0,[sp,#8]; LDR w1,[x0]; ORR w1,w1,#0x4000000;
ORR w1,w1,#0xF; AND w1,w1,#0xFFFFC0FF; STR w1,[x0];
MOV x0,xzr; RETAB
"""
self._log("\n[JB] _cred_label_update_execve: shellcode (cs_flags)")
# Find the function via AMFI string reference
func_off = -1
# Try symbol
for sym, off in self.symbols.items():
if "cred_label_update_execve" in sym and "hook" not in sym:
func_off = off
break
if func_off < 0:
# String anchor: the function is near execve-related AMFI code.
# Look for the function that contains the AMFI string ref and
# then find _cred_label_update_execve through BL targets.
str_off = self.find_string(b"AMFI: code signature validation failed")
if str_off >= 0:
refs = self.find_string_refs(str_off, *self.amfi_text)
if refs:
caller = self.find_function_start(refs[0][0])
if caller >= 0:
# Walk through the AMFI text section to find functions
# that have a RETAB at the end and take many arguments
# The _cred_label_update_execve has many args and a
# distinctive prologue.
pass
if func_off < 0:
# Alternative: search AMFI text for functions that match the pattern
# of _cred_label_update_execve (long prologue, many saved regs, RETAB)
# Look for the specific pattern: mov xN, x2 in early prologue
# (saves the vnode arg) followed by stp xzr,xzr pattern
s, e = self.amfi_text
# Search for PACIBSP functions in AMFI that are BL targets from
# the execve kill path area
str_off = self.find_string(b"AMFI: hook..execve() killing")
if str_off < 0:
str_off = self.find_string(b"execve() killing")
if str_off >= 0:
refs = self.find_string_refs(str_off, s, e)
if not refs:
refs = self.find_string_refs(str_off)
if refs:
kill_func = self.find_function_start(refs[0][0])
if kill_func >= 0:
kill_end = self._find_func_end(kill_func, 0x800)
# The kill function ends with RETAB. The next function
# after it should be close to _cred_label_update_execve.
# Actually, _cred_label_update_execve is typically the
# function BEFORE the kill function.
# Search backward from kill_func for a RETAB/RET
for back in range(kill_func - 4, max(kill_func - 0x400, s), -4):
val = _rd32(self.raw, back)
if val in (0xD65F0FFF, 0xD65F0BFF, 0xD65F03C0):
# Found end of previous function.
# The function we want starts at the next PACIBSP before back.
for scan in range(back - 4, max(back - 0x400, s), -4):
d = self._disas_at(scan)
if d and d[0].mnemonic == "pacibsp":
func_off = scan
break
break
if func_off < 0:
self._log(" [-] function not found, skipping shellcode patch")
return False
# Find code cave
cave = self._find_code_cave(32) # 8 instructions = 32 bytes
if cave < 0:
self._log(" [-] no code cave found for shellcode")
return False
# Assemble shellcode
shellcode = (
asm("ldr x0, [sp, #8]") # load cred pointer
+ asm("ldr w1, [x0]") # load cs_flags
+ asm("orr w1, w1, #0x4000000") # set CS_PLATFORM_BINARY
+ asm(
"orr w1, w1, #0xF"
) # set CS_VALID|CS_ADHOC|CS_GET_TASK_ALLOW|CS_INSTALLER
+ bytes(
[0x21, 0x64, 0x12, 0x12]
) # AND w1, w1, #0xFFFFC0FF (clear CS_HARD|CS_KILL etc)
+ asm("str w1, [x0]") # store back
+ asm("mov x0, xzr") # return 0
+ bytes([0xFF, 0x0F, 0x5F, 0xD6]) # RETAB
)
# Find the return site in the function (last RETAB)
func_end = self._find_func_end(func_off, 0x200)
ret_off = -1
for off in range(func_end - 4, func_off, -4):
val = _rd32(self.raw, off)
if val in (0xD65F0FFF, 0xD65F0BFF, 0xD65F03C0):
ret_off = off
break
if ret_off < 0:
self._log(" [-] function return not found")
return False
# Write shellcode to cave
for i in range(0, len(shellcode), 4):
self.emit(
cave + i,
shellcode[i : i + 4],
f"shellcode+{i} [_cred_label_update_execve]",
)
# Branch from function return to cave
b_bytes = self._encode_b(ret_off, cave)
if b_bytes:
self.emit(
ret_off, b_bytes, f"b cave [_cred_label_update_execve -> 0x{cave:X}]"
)
else:
self._log(" [-] branch to cave out of range")
return False
return True

View File

@@ -0,0 +1,81 @@
"""Mixin: KernelJBPatchDounmountMixin."""
from .kernel_jb_base import NOP
class KernelJBPatchDounmountMixin:
def patch_dounmount(self):
"""NOP a MAC check in _dounmount.
Pattern: mov w1,#0; mov x2,#0; bl TARGET (MAC policy check pattern).
"""
self._log("\n[JB] _dounmount: NOP")
# Try symbol first
foff = self._resolve_symbol("_dounmount")
if foff >= 0:
func_end = self._find_func_end(foff, 0x1000)
result = self._find_mac_check_bl(foff, func_end)
if result:
self.emit(result, NOP, "NOP [_dounmount MAC check]")
return True
# String anchor: "dounmount:" → find function → search BL targets
# for the actual _dounmount with MAC check
str_off = self.find_string(b"dounmount:")
if str_off >= 0:
refs = self.find_string_refs(str_off)
for adrp_off, _, _ in refs:
caller = self.find_function_start(adrp_off)
if caller < 0:
continue
caller_end = self._find_func_end(caller, 0x2000)
# Check BL targets from this function
for off in range(caller, caller_end, 4):
target = self._is_bl(off)
if target < 0 or not (
self.kern_text[0] <= target < self.kern_text[1]
):
continue
te = self._find_func_end(target, 0x400)
result = self._find_mac_check_bl(target, te)
if result:
self.emit(result, NOP, "NOP [_dounmount MAC check]")
return True
# Broader: scan kern_text for short functions with MAC check pattern
ks, ke = self.kern_text
for off in range(ks, ke - 12, 4):
d = self._disas_at(off)
if not d or d[0].mnemonic != "pacibsp":
continue
func_end = self._find_func_end(off, 0x400)
if func_end - off > 0x400:
continue
result = self._find_mac_check_bl(off, func_end)
if result:
# Verify: function should have "unmount" context
# (contain a BL to a function also called from known mount code)
self.emit(result, NOP, "NOP [_dounmount MAC check]")
return True
self._log(" [-] patch site not found")
return False
def _find_mac_check_bl(self, start, end):
"""Find mov w1,#0; mov x2,#0; bl TARGET pattern. Returns BL offset or None."""
for off in range(start, end - 8, 4):
d = self._disas_at(off, 3)
if len(d) < 3:
continue
i0, i1, i2 = d[0], d[1], d[2]
if i0.mnemonic != "mov" or i1.mnemonic != "mov" or i2.mnemonic != "bl":
continue
# Check: mov w1, #0; mov x2, #0
if "w1" in i0.op_str and "#0" in i0.op_str:
if "x2" in i1.op_str and "#0" in i1.op_str:
return off + 8
# Also match: mov x2, #0; mov w1, #0
if "x2" in i0.op_str and "#0" in i0.op_str:
if "w1" in i1.op_str and "#0" in i1.op_str:
return off + 8
return None

View File

@@ -0,0 +1,173 @@
"""Mixin: KernelJBPatchHookCredLabelMixin."""
from .kernel_jb_base import asm, _rd32, _rd64, RET, NOP, struct
class KernelJBPatchHookCredLabelMixin:
def patch_hook_cred_label_update_execve(self):
"""Redirect _hook_cred_label_update_execve ops table entry to shellcode.
Patches the sandbox MAC ops table entry for cred_label_update_execve
to point to custom shellcode that performs vnode_getattr ownership
propagation. Instead of calling vfs_context_current (which may not
exist as a BL-callable function), we construct a vfs_context on the
stack using current_thread (mrs tpidr_el1) and the caller's
credential (x0 = old_cred).
"""
self._log("\n[JB] _hook_cred_label_update_execve: ops table + shellcode")
# ── 1. Find vnode_getattr via string anchor ──────────────
vnode_getattr_off = self._resolve_symbol("_vnode_getattr")
if vnode_getattr_off < 0:
str_off = self.find_string(b"vnode_getattr")
if str_off >= 0:
refs = self.find_string_refs(str_off)
if refs:
vnode_getattr_off = self.find_function_start(refs[0][0])
if vnode_getattr_off >= 0:
self._log(
f" [+] vnode_getattr at 0x"
f"{vnode_getattr_off:X} (via string)"
)
if vnode_getattr_off < 0:
self._log(" [-] vnode_getattr not found")
return False
# ── 2. Find sandbox ops table ────────────────────────────
ops_table = self._find_sandbox_ops_table_via_conf()
if ops_table is None:
self._log(" [-] sandbox ops table not found")
return False
# ── 3. Find hook index dynamically ───────────────────────
# mpo_cred_label_update_execve is one of the largest sandbox
# hooks at an early index (< 30). Scan for it.
hook_index = -1
orig_hook = -1
best_size = 0
for idx in range(0, 30):
entry = self._read_ops_entry(ops_table, idx)
if entry is None or entry <= 0:
continue
if not any(s <= entry < e for s, e in self.code_ranges):
continue
fend = self._find_func_end(entry, 0x2000)
fsize = fend - entry
if fsize > best_size:
best_size = fsize
hook_index = idx
orig_hook = entry
if hook_index < 0 or best_size < 1000:
self._log(
" [-] hook entry not found in ops table "
f"(best: idx={hook_index}, size={best_size})"
)
return False
self._log(
f" [+] hook at ops[{hook_index}] = 0x{orig_hook:X} ({best_size} bytes)"
)
# ── 4. Find code cave ────────────────────────────────────
cave = self._find_code_cave(180)
if cave < 0:
self._log(" [-] no code cave found")
return False
self._log(f" [+] code cave at 0x{cave:X}")
# ── 5. Encode BL to vnode_getattr ────────────────────────
vnode_bl_off = cave + 17 * 4
vnode_bl = self._encode_bl(vnode_bl_off, vnode_getattr_off)
if not vnode_bl:
self._log(" [-] BL to vnode_getattr out of range")
return False
# ── 6. Encode B to original hook ─────────────────────────
b_back_off = cave + 44 * 4
b_back = self._encode_b(b_back_off, orig_hook)
if not b_back:
self._log(" [-] B to original hook out of range")
return False
# ── 7. Build shellcode ───────────────────────────────────
# MAC hook args: x0=old_cred, x1=new_cred, x2=proc, x3=vp
#
# Parts [8-10] construct a vfs_context on the stack instead
# of calling vfs_context_current, which may not exist as a
# direct BL target in stripped ARM64e kernels.
#
# struct vfs_context { thread_t vc_thread; kauth_cred_t vc_ucred; }
# We place it at [sp, #0x70] (between saved regs and vattr buffer).
parts = []
parts.append(NOP) # 0
parts.append(asm("cbz x3, #0xa8")) # 1
parts.append(asm("sub sp, sp, #0x400")) # 2
parts.append(asm("stp x29, x30, [sp]")) # 3
parts.append(asm("stp x0, x1, [sp, #16]")) # 4
parts.append(asm("stp x2, x3, [sp, #32]")) # 5
parts.append(asm("stp x4, x5, [sp, #48]")) # 6
parts.append(asm("stp x6, x7, [sp, #64]")) # 7
# Construct vfs_context inline (replaces BL vfs_context_current)
parts.append(asm("mrs x8, tpidr_el1")) # 8: current_thread
parts.append(asm("stp x8, x0, [sp, #0x70]")) # 9: {thread, cred}
parts.append(asm("add x2, sp, #0x70")) # 10: ctx = &vfs_ctx
# Setup vnode_getattr(vp, &vattr, ctx)
parts.append(asm("ldr x0, [sp, #0x28]")) # 11: x0 = vp
parts.append(asm("add x1, sp, #0x80")) # 12: x1 = &vattr
parts.append(asm("mov w8, #0x380")) # 13: vattr size
parts.append(asm("stp xzr, x8, [x1]")) # 14: init vattr
parts.append(asm("stp xzr, xzr, [x1, #0x10]")) # 15: init vattr
parts.append(NOP) # 16
parts.append(vnode_bl) # 17: BL vnode_getattr
# Check result + propagate ownership
parts.append(asm("cbnz x0, #0x50")) # 18: error → skip
parts.append(asm("mov w2, #0")) # 19: changed = 0
parts.append(asm("ldr w8, [sp, #0xCC]")) # 20: va_mode
parts.append(bytes([0xA8, 0x00, 0x58, 0x36])) # 21: tbz w8,#11
parts.append(asm("ldr w8, [sp, #0xC4]")) # 22: va_uid
parts.append(asm("ldr x0, [sp, #0x18]")) # 23: new_cred
parts.append(asm("str w8, [x0, #0x18]")) # 24: cred->uid
parts.append(asm("mov w2, #1")) # 25: changed = 1
parts.append(asm("ldr w8, [sp, #0xCC]")) # 26: va_mode
parts.append(bytes([0xA8, 0x00, 0x50, 0x36])) # 27: tbz w8,#10
parts.append(asm("mov w2, #1")) # 28: changed = 1
parts.append(asm("ldr w8, [sp, #0xC8]")) # 29: va_gid
parts.append(asm("ldr x0, [sp, #0x18]")) # 30: new_cred
parts.append(asm("str w8, [x0, #0x28]")) # 31: cred->gid
parts.append(asm("cbz w2, #0x1c")) # 32: if !changed
parts.append(asm("ldr x0, [sp, #0x20]")) # 33: proc
parts.append(asm("ldr w8, [x0, #0x454]")) # 34: p_csflags
parts.append(asm("orr w8, w8, #0x100")) # 35: CS_VALID
parts.append(asm("str w8, [x0, #0x454]")) # 36: store
parts.append(asm("ldp x0, x1, [sp, #16]")) # 37: restore
parts.append(asm("ldp x2, x3, [sp, #32]")) # 38
parts.append(asm("ldp x4, x5, [sp, #48]")) # 39
parts.append(asm("ldp x6, x7, [sp, #64]")) # 40
parts.append(asm("ldp x29, x30, [sp]")) # 41
parts.append(asm("add sp, sp, #0x400")) # 42
parts.append(NOP) # 43
parts.append(b_back) # 44: B orig_hook
for i, part in enumerate(parts):
self.emit(
cave + i * 4,
part,
f"shellcode+{i * 4} [_hook_cred_label_update_execve]",
)
# ── 8. Rewrite ops table entry ───────────────────────────
# Preserve auth rebase upper 32 bits (PAC key, diversity,
# chain next) and replace lower 32 bits with cave foff.
entry_off = ops_table + hook_index * 8
orig_raw = _rd64(self.raw, entry_off)
new_raw = (orig_raw & 0xFFFFFFFF00000000) | (cave & 0xFFFFFFFF)
self.emit(
entry_off,
struct.pack("<Q", new_raw),
f"ops_table[{hook_index}] = cave 0x{cave:X} "
f"[_hook_cred_label_update_execve]",
)
return True

View File

@@ -0,0 +1,137 @@
"""Mixin: KernelJBPatchKcall10Mixin."""
from .kernel_jb_base import asm, _rd32, _rd64, RET, NOP, struct
class KernelJBPatchKcall10Mixin:
def patch_kcall10(self):
"""Replace SYS_kas_info (syscall 439) with kcall10 shellcode.
Anchor: find _nosys function by pattern, then search DATA segments
for the sysent table (first entry points to _nosys).
"""
self._log("\n[JB] kcall10: syscall 439 replacement")
# Find _nosys
nosys_off = self._resolve_symbol("_nosys")
if nosys_off < 0:
nosys_off = self._find_nosys()
if nosys_off < 0:
self._log(" [-] _nosys not found")
return False
self._log(f" [+] _nosys at 0x{nosys_off:X}")
# Find _munge_wwwwwwww
munge_off = self._resolve_symbol("_munge_wwwwwwww")
if munge_off < 0:
for sym, off in self.symbols.items():
if "munge_wwwwwwww" in sym:
munge_off = off
break
# Search for sysent table in DATA segments
sysent_off = -1
for seg_name, vmaddr, fileoff, filesize, _ in self.all_segments:
if "DATA" not in seg_name:
continue
for off in range(fileoff, fileoff + filesize - 24, 8):
val = _rd64(self.raw, off)
decoded = self._decode_chained_ptr(val)
if decoded == nosys_off:
# Verify: sysent[1] should also point to valid code
val2 = _rd64(self.raw, off + 24)
decoded2 = self._decode_chained_ptr(val2)
if decoded2 > 0 and any(
s <= decoded2 < e for s, e in self.code_ranges
):
sysent_off = off
break
if sysent_off >= 0:
break
if sysent_off < 0:
self._log(" [-] sysent table not found")
return False
self._log(f" [+] sysent table at file offset 0x{sysent_off:X}")
# Entry 439 (SYS_kas_info)
entry_439 = sysent_off + 439 * 24
# Find code cave for kcall10 shellcode (~128 bytes = 32 instructions)
cave = self._find_code_cave(128)
if cave < 0:
self._log(" [-] no code cave found")
return False
# Build kcall10 shellcode
parts = [
asm("ldr x10, [sp, #0x40]"), # 0
asm("ldp x0, x1, [x10, #0]"), # 1
asm("ldp x2, x3, [x10, #0x10]"), # 2
asm("ldp x4, x5, [x10, #0x20]"), # 3
asm("ldp x6, x7, [x10, #0x30]"), # 4
asm("ldp x8, x9, [x10, #0x40]"), # 5
asm("ldr x10, [x10, #0x50]"), # 6
asm("mov x16, x0"), # 7
asm("mov x0, x1"), # 8
asm("mov x1, x2"), # 9
asm("mov x2, x3"), # 10
asm("mov x3, x4"), # 11
asm("mov x4, x5"), # 12
asm("mov x5, x6"), # 13
asm("mov x6, x7"), # 14
asm("mov x7, x8"), # 15
asm("mov x8, x9"), # 16
asm("mov x9, x10"), # 17
asm("stp x29, x30, [sp, #-0x10]!"), # 18
bytes([0x00, 0x02, 0x3F, 0xD6]), # 19: BLR x16
asm("ldp x29, x30, [sp], #0x10"), # 20
asm("ldr x11, [sp, #0x40]"), # 21
NOP, # 22
asm("stp x0, x1, [x11, #0]"), # 23
asm("stp x2, x3, [x11, #0x10]"), # 24
asm("stp x4, x5, [x11, #0x20]"), # 25
asm("stp x6, x7, [x11, #0x30]"), # 26
asm("stp x8, x9, [x11, #0x40]"), # 27
asm("str x10, [x11, #0x50]"), # 28
asm("mov x0, #0"), # 29
asm("ret"), # 30
NOP, # 31
]
for i, part in enumerate(parts):
self.emit(cave + i * 4, part, f"shellcode+{i * 4} [kcall10]")
# Patch sysent[439]
cave_va = self.base_va + cave
self.emit(
entry_439,
struct.pack("<Q", cave_va),
f"sysent[439].sy_call = 0x{cave_va:X} [kcall10]",
)
if munge_off >= 0:
munge_va = self.base_va + munge_off
self.emit(
entry_439 + 8,
struct.pack("<Q", munge_va),
f"sysent[439].sy_munge32 = 0x{munge_va:X} [kcall10]",
)
# sy_return_type = SYSCALL_RET_UINT64_T (7)
self.emit(
entry_439 + 16,
struct.pack("<I", 7),
"sysent[439].sy_return_type = 7 [kcall10]",
)
# sy_narg = 8, sy_arg_bytes = 0x20
self.emit(
entry_439 + 20,
struct.pack("<I", 0x200008),
"sysent[439].sy_narg=8,sy_arg_bytes=0x20 [kcall10]",
)
return True

View File

@@ -0,0 +1,106 @@
"""Mixin: KernelJBPatchLoadDylinkerMixin."""
from .kernel_jb_base import NOP
class KernelJBPatchLoadDylinkerMixin:
def patch_load_dylinker(self):
"""Bypass PAC auth check in Mach-O chained fixup rebase code.
The kernel's chained fixup pointer rebase function contains PAC
authentication triplets: TST xN, #high; B.EQ skip; MOVK xN, #0xc8a2.
This function has 3+ such triplets and 0 BL callers (indirect call).
Find the function and replace the LAST TST with an unconditional
branch to the B.EQ target (always skip PAC re-signing).
"""
self._log("\n[JB] _load_dylinker: PAC rebase bypass")
# Try symbol first
foff = self._resolve_symbol("_load_dylinker")
if foff >= 0:
func_end = self._find_func_end(foff, 0x2000)
result = self._find_tst_pac_triplet(foff, func_end)
if result:
tst_off, beq_target = result
b_bytes = self._encode_b(tst_off, beq_target)
if b_bytes:
self.emit(
tst_off,
b_bytes,
f"b #0x{beq_target - tst_off:X} [_load_dylinker]",
)
return True
# Pattern search: find functions with 3+ TST+B.EQ+MOVK(#0xc8a2)
# triplets and 0 BL callers. This is the chained fixup rebase code.
ks, ke = self.kern_text
off = ks
while off < ke - 4:
d = self._disas_at(off)
if not d or d[0].mnemonic != "pacibsp":
off += 4
continue
func_start = off
func_end = self._find_func_end(func_start, 0x2000)
# Must have 0 BL callers (indirect call via function pointer)
if self.bl_callers.get(func_start, []):
off = func_end
continue
# Count TST+B.EQ+MOVK(#0xc8a2) triplets
triplets = []
for o in range(func_start, func_end - 8, 4):
d3 = self._disas_at(o, 3)
if len(d3) < 3:
continue
i0, i1, i2 = d3[0], d3[1], d3[2]
if (
i0.mnemonic == "tst"
and "40000000000000" in i0.op_str
and i1.mnemonic == "b.eq"
and i2.mnemonic == "movk"
and "#0xc8a2" in i2.op_str
):
beq_target = i1.operands[-1].imm
triplets.append((o, beq_target))
if len(triplets) >= 3:
# Patch the last triplet (deepest in the function)
tst_off, beq_target = triplets[-1]
b_bytes = self._encode_b(tst_off, beq_target)
if b_bytes:
self._log(
f" [+] rebase func at 0x{func_start:X}, "
f"patch TST at 0x{tst_off:X}"
)
self.emit(
tst_off,
b_bytes,
f"b #0x{beq_target - tst_off:X} [_load_dylinker PAC bypass]",
)
return True
off = func_end
self._log(" [-] PAC rebase function not found")
return False
def _find_tst_pac_triplet(self, start, end):
"""Find last TST+B.EQ+MOVK(#0xc8a2) triplet. Returns (tst_off, beq_target)."""
last = None
for off in range(start, end - 8, 4):
d = self._disas_at(off, 3)
if len(d) < 3:
continue
i0, i1, i2 = d[0], d[1], d[2]
if (
i0.mnemonic == "tst"
and "40000000000000" in i0.op_str
and i1.mnemonic == "b.eq"
and i2.mnemonic == "movk"
and "#0xc8a2" in i2.op_str
):
last = (off, i1.operands[-1].imm)
return last

View File

@@ -0,0 +1,86 @@
"""Mixin: KernelJBPatchMacMountMixin."""
from .kernel_jb_base import NOP, MOV_X8_XZR
class KernelJBPatchMacMountMixin:
def patch_mac_mount(self):
"""Bypass MAC mount check: NOP + mov x8,xzr in ___mac_mount.
Anchor: 'mount_common()' string → find nearby ___mac_mount function.
"""
self._log("\n[JB] ___mac_mount: NOP + mov x8,xzr")
# Try symbol first
foff = self._resolve_symbol("___mac_mount")
if foff < 0:
foff = self._resolve_symbol("__mac_mount")
if foff < 0:
# Find via 'mount_common()' string → function area
# ___mac_mount is typically called from mount_common/kernel_mount
# Search for a function containing a BL+CBNZ w0 pattern
# near the mount_common string reference area
str_off = self.find_string(b"mount_common()")
if str_off >= 0:
refs = self.find_string_refs(str_off, *self.kern_text)
if refs:
mount_common_func = self.find_function_start(refs[0][0])
if mount_common_func >= 0:
# __mac_mount is called from mount_common
# Find BL targets from mount_common
mc_end = self._find_func_end(mount_common_func, 0x2000)
for off in range(mount_common_func, mc_end, 4):
target = self._is_bl(off)
if (
target >= 0
and self.kern_text[0] <= target < self.kern_text[1]
):
# Check if this target contains BL+CBNZ w0 pattern
# (mac check) followed by a mov to x8
te = self._find_func_end(target, 0x1000)
for off2 in range(target, te - 8, 4):
d0 = self._disas_at(off2)
if not d0 or d0[0].mnemonic != "bl":
continue
d1 = self._disas_at(off2 + 4)
if (
d1
and d1[0].mnemonic == "cbnz"
and d1[0].op_str.startswith("w0,")
):
foff = target
break
if foff >= 0:
break
if foff < 0:
self._log(" [-] function not found")
return False
func_end = self._find_func_end(foff, 0x1000)
patched = 0
for off in range(foff, func_end - 8, 4):
d0 = self._disas_at(off)
if not d0 or d0[0].mnemonic != "bl":
continue
d1 = self._disas_at(off + 4)
if not d1:
continue
if d1[0].mnemonic == "cbnz" and d1[0].op_str.startswith("w0,"):
self.emit(off, NOP, "NOP [___mac_mount BL check]")
patched += 1
for off2 in range(off + 8, min(off + 0x60, func_end), 4):
d2 = self._disas_at(off2)
if not d2:
continue
if d2[0].mnemonic == "mov" and "x8" in d2[0].op_str:
if d2[0].op_str != "x8, xzr":
self.emit(off2, MOV_X8_XZR, "mov x8,xzr [___mac_mount]")
patched += 1
break
break
if patched == 0:
self._log(" [-] patch sites not found")
return False
return True

View File

@@ -0,0 +1,73 @@
"""Mixin: KernelJBPatchNvramMixin."""
from .kernel_jb_base import NOP
class KernelJBPatchNvramMixin:
def patch_nvram_verify_permission(self):
"""NOP verification in IONVRAMController's verifyPermission.
Anchor: 'krn.' string (NVRAM key prefix) → xref → function → TBZ/TBNZ.
"""
self._log("\n[JB] verifyPermission (NVRAM): NOP")
# Try symbol first
sym_off = self._resolve_symbol(
"__ZL16verifyPermission16IONVRAMOperationPKhPKcb"
)
if sym_off < 0:
for sym, off in self.symbols.items():
if "verifyPermission" in sym and "NVRAM" in sym:
sym_off = off
break
# String anchor: "krn." is referenced in verifyPermission.
# The TBZ/TBNZ guard is immediately before the ADRP+ADD that
# loads the "krn." string, so search backward from that ref.
str_off = self.find_string(b"krn.")
ref_off = -1
if str_off >= 0:
refs = self.find_string_refs(str_off)
if refs:
ref_off = refs[0][0] # ADRP instruction offset
foff = (
sym_off
if sym_off >= 0
else (self.find_function_start(ref_off) if ref_off >= 0 else -1)
)
if foff < 0:
# Fallback: try NVRAM entitlement string
ent_off = self.find_string(b"com.apple.private.iokit.nvram-write-access")
if ent_off >= 0:
ent_refs = self.find_string_refs(ent_off)
if ent_refs:
foff = self.find_function_start(ent_refs[0][0])
if foff < 0:
self._log(" [-] function not found")
return False
func_end = self._find_func_end(foff, 0x600)
# Strategy 1: search backward from "krn." string ref for
# nearest TBZ/TBNZ — the guard branch is typically within
# a few instructions before the ADRP that loads "krn.".
if ref_off > foff:
for off in range(ref_off - 4, max(foff - 4, ref_off - 0x20), -4):
d = self._disas_at(off)
if d and d[0].mnemonic in ("tbnz", "tbz"):
self.emit(off, NOP, "NOP [verifyPermission NVRAM]")
return True
# Strategy 2: scan full function for first TBZ/TBNZ
for off in range(foff, func_end, 4):
d = self._disas_at(off)
if not d:
continue
if d[0].mnemonic in ("tbnz", "tbz"):
self.emit(off, NOP, "NOP [verifyPermission NVRAM]")
return True
self._log(" [-] TBZ/TBNZ not found in function")
return False

View File

@@ -0,0 +1,45 @@
"""Mixin: KernelJBPatchPortToMapMixin."""
from .kernel_jb_base import ARM64_OP_IMM
class KernelJBPatchPortToMapMixin:
def patch_convert_port_to_map(self):
"""Skip panic in _convert_port_to_map_with_flavor.
Anchor: 'userspace has control access to a kernel map' panic string.
"""
self._log("\n[JB] _convert_port_to_map_with_flavor: skip panic")
str_off = self.find_string(b"userspace has control access to a kernel map")
if str_off < 0:
self._log(" [-] panic string not found")
return False
refs = self.find_string_refs(str_off, *self.kern_text)
if not refs:
self._log(" [-] no code refs")
return False
for adrp_off, add_off, _ in refs:
bl_panic = self._find_bl_to_panic_in_range(
add_off, min(add_off + 0x40, self.size)
)
if bl_panic < 0:
continue
resume_off = bl_panic + 4
err_lo = adrp_off - 0x40
for back in range(adrp_off - 4, max(adrp_off - 0x200, 0), -4):
target, kind = self._decode_branch_target(back)
if target is not None and err_lo <= target <= bl_panic + 4:
b_bytes = self._encode_b(back, resume_off)
if b_bytes:
self.emit(
back,
b_bytes,
f"b #0x{resume_off - back:X} "
f"[_convert_port_to_map skip panic]",
)
return True
self._log(" [-] branch site not found")
return False

View File

@@ -0,0 +1,66 @@
"""Mixin: KernelJBPatchPostValidationMixin."""
from .kernel_jb_base import ARM64_OP_REG, ARM64_OP_IMM, ARM64_REG_W0, CMP_W0_W0
class KernelJBPatchPostValidationMixin:
def patch_post_validation_additional(self):
"""Additional postValidation CMP W0,W0 in AMFI code signing path."""
self._log("\n[JB] postValidation additional: cmp w0,w0")
str_off = self.find_string(b"AMFI: code signature validation failed")
if str_off < 0:
self._log(" [-] string not found")
return False
refs = self.find_string_refs(str_off, *self.amfi_text)
if not refs:
refs = self.find_string_refs(str_off)
if not refs:
self._log(" [-] no code refs")
return False
caller_start = self.find_function_start(refs[0][0])
if caller_start < 0:
return False
bl_targets = set()
func_end = self._find_func_end(caller_start, 0x2000)
for scan in range(caller_start, func_end, 4):
target = self._is_bl(scan)
if target >= 0:
bl_targets.add(target)
patched = 0
for target in sorted(bl_targets):
if not (self.amfi_text[0] <= target < self.amfi_text[1]):
continue
callee_end = self._find_func_end(target, 0x200)
for off in range(target, callee_end, 4):
d = self._disas_at(off, 2)
if len(d) < 2:
continue
i0, i1 = d[0], d[1]
if i0.mnemonic != "cmp" or i1.mnemonic != "b.ne":
continue
ops = i0.operands
if len(ops) < 2:
continue
if ops[0].type != ARM64_OP_REG or ops[0].reg != ARM64_REG_W0:
continue
if ops[1].type != ARM64_OP_IMM:
continue
has_bl = False
for back in range(off - 4, max(off - 12, target), -4):
bt = self._is_bl(back)
if bt >= 0:
has_bl = True
break
if has_bl:
self.emit(off, CMP_W0_W0, f"cmp w0,w0 [postValidation additional]")
patched += 1
if patched == 0:
self._log(" [-] no additional postValidation CMP sites found")
return False
return True

View File

@@ -0,0 +1,79 @@
"""Mixin: KernelJBPatchProcPidinfoMixin."""
from .kernel_jb_base import ARM64_OP_IMM, NOP
class KernelJBPatchProcPidinfoMixin:
def patch_proc_pidinfo(self):
"""Bypass pid-0 checks in _proc_info: NOP first 2 CBZ/CBNZ on w-regs.
Anchor: find _proc_info via its switch-table pattern, then NOP the
first two CBZ/CBNZ instructions that guard against pid 0.
"""
self._log("\n[JB] _proc_pidinfo: NOP pid-0 guard (2 sites)")
# Try symbol first
foff = self._resolve_symbol("_proc_pidinfo")
if foff >= 0:
func_end = min(foff + 0x80, self.size)
hits = []
for off in range(foff, func_end, 4):
d = self._disas_at(off)
if (
d
and d[0].mnemonic in ("cbz", "cbnz")
and d[0].op_str.startswith("w")
):
hits.append(off)
if len(hits) >= 2:
self.emit(hits[0], NOP, "NOP [_proc_pidinfo pid-0 guard A]")
self.emit(hits[1], NOP, "NOP [_proc_pidinfo pid-0 guard B]")
return True
# Find _proc_info by switch table pattern (same as proc_security_policy)
proc_info_func = -1
ks, ke = self.kern_text
for off in range(ks, ke - 8, 4):
d = self._disas_at(off, 2)
if len(d) < 2:
continue
i0, i1 = d[0], d[1]
if i0.mnemonic != "sub" or i1.mnemonic != "cmp":
continue
if len(i0.operands) < 3:
continue
if i0.operands[2].type != ARM64_OP_IMM or i0.operands[2].imm != 1:
continue
if len(i1.operands) < 2:
continue
if i1.operands[1].type != ARM64_OP_IMM or i1.operands[1].imm != 0x21:
continue
if i0.operands[0].reg != i1.operands[0].reg:
continue
proc_info_func = self.find_function_start(off)
break
if proc_info_func < 0:
self._log(" [-] _proc_info function not found")
return False
# Find first CBZ x0 (null proc check) and the CBZ/CBNZ wN after
# the first BL in the prologue region
hits = []
prologue_end = min(proc_info_func + 0x80, self.size)
for off in range(proc_info_func, prologue_end, 4):
d = self._disas_at(off)
if not d:
continue
i = d[0]
if i.mnemonic in ("cbz", "cbnz"):
# CBZ x0 (null check) or CBZ wN (pid-0 check)
hits.append(off)
if len(hits) < 2:
self._log(f" [-] expected 2+ early CBZ/CBNZ, found {len(hits)}")
return False
self.emit(hits[0], NOP, "NOP [_proc_pidinfo pid-0 guard A]")
self.emit(hits[1], NOP, "NOP [_proc_pidinfo pid-0 guard B]")
return True

View File

@@ -0,0 +1,84 @@
"""Mixin: KernelJBPatchProcSecurityMixin."""
from .kernel_jb_base import ARM64_OP_IMM, MOV_X0_0, RET, Counter
class KernelJBPatchProcSecurityMixin:
def patch_proc_security_policy(self):
"""Stub _proc_security_policy: mov x0,#0; ret.
Anchor: find _proc_info via its distinctive switch-table pattern
(sub wN,wM,#1; cmp wN,#0x21), then identify the most-called BL
target within that function — that's _proc_security_policy.
"""
self._log("\n[JB] _proc_security_policy: mov x0,#0; ret")
# Try symbol first
foff = self._resolve_symbol("_proc_security_policy")
if foff >= 0:
self.emit(foff, MOV_X0_0, "mov x0,#0 [_proc_security_policy]")
self.emit(foff + 4, RET, "ret [_proc_security_policy]")
return True
# Find _proc_info by its distinctive switch table
# Pattern: sub wN, wM, #1; cmp wN, #0x21 (33 = max proc_info callnum)
proc_info_func = -1
ks, ke = self.kern_text
for off in range(ks, ke - 8, 4):
d = self._disas_at(off, 2)
if len(d) < 2:
continue
i0, i1 = d[0], d[1]
if i0.mnemonic != "sub" or i1.mnemonic != "cmp":
continue
# sub wN, wM, #1
if len(i0.operands) < 3:
continue
if i0.operands[2].type != ARM64_OP_IMM or i0.operands[2].imm != 1:
continue
# cmp wN, #0x21
if len(i1.operands) < 2:
continue
if i1.operands[1].type != ARM64_OP_IMM or i1.operands[1].imm != 0x21:
continue
# Verify same register
if i0.operands[0].reg != i1.operands[0].reg:
continue
# Found it — find function start
proc_info_func = self.find_function_start(off)
break
if proc_info_func < 0:
self._log(" [-] _proc_info function not found")
return False
proc_info_end = self._find_func_end(proc_info_func, 0x4000)
self._log(
f" [+] _proc_info at 0x{proc_info_func:X} (size 0x{proc_info_end - proc_info_func:X})"
)
# Count BL targets within _proc_info — the most frequent one
# is _proc_security_policy (called once per switch case)
bl_targets = Counter()
for off in range(proc_info_func, proc_info_end, 4):
target = self._is_bl(off)
if target >= 0 and ks <= target < ke:
bl_targets[target] += 1
if not bl_targets:
self._log(" [-] no BL targets found in _proc_info")
return False
# The security policy check is called the most (once per case)
most_called = bl_targets.most_common(1)[0]
foff = most_called[0]
count = most_called[1]
self._log(f" [+] most-called BL target: 0x{foff:X} ({count} calls)")
if count < 3:
self._log(" [-] most-called target has too few calls")
return False
self.emit(foff, MOV_X0_0, "mov x0,#0 [_proc_security_policy]")
self.emit(foff + 4, RET, "ret [_proc_security_policy]")
return True

View File

@@ -0,0 +1,69 @@
"""Mixin: KernelJBPatchSandboxExtendedMixin."""
from .kernel_jb_base import MOV_X0_0, RET
class KernelJBPatchSandboxExtendedMixin:
def patch_sandbox_hooks_extended(self):
"""Stub remaining sandbox MACF hooks (JB extension beyond base 5 hooks)."""
self._log("\n[JB] Sandbox extended hooks: mov x0,#0; ret")
ops_table = self._find_sandbox_ops_table_via_conf()
if ops_table is None:
return False
HOOK_INDICES_EXT = {
"vnode_check_getattr": 245,
"proc_check_get_cs_info": 249,
"proc_check_set_cs_info": 250,
"proc_check_set_cs_info2": 252,
"vnode_check_chroot": 254,
"vnode_check_create": 255,
"vnode_check_deleteextattr": 256,
"vnode_check_exchangedata": 257,
"vnode_check_exec": 258,
"vnode_check_getattrlist": 259,
"vnode_check_getextattr": 260,
"vnode_check_ioctl": 261,
"vnode_check_link": 264,
"vnode_check_listextattr": 265,
"vnode_check_open": 267,
"vnode_check_readlink": 270,
"vnode_check_setattrlist": 275,
"vnode_check_setextattr": 276,
"vnode_check_setflags": 277,
"vnode_check_setmode": 278,
"vnode_check_setowner": 279,
"vnode_check_setutimes": 280,
"vnode_check_stat": 281,
"vnode_check_truncate": 282,
"vnode_check_unlink": 283,
"vnode_check_fsgetpath": 316,
}
sb_start, sb_end = self.sandbox_text
patched = 0
seen = set()
for hook_name, idx in HOOK_INDICES_EXT.items():
func_off = self._read_ops_entry(ops_table, idx)
if func_off is None or func_off <= 0:
continue
if not (sb_start <= func_off < sb_end):
continue
if func_off in seen:
continue
seen.add(func_off)
self.emit(func_off, MOV_X0_0, f"mov x0,#0 [_hook_{hook_name}]")
self.emit(func_off + 4, RET, f"ret [_hook_{hook_name}]")
patched += 1
if patched == 0:
self._log(" [-] no extended sandbox hooks patched")
return False
return True
# ══════════════════════════════════════════════════════════════
# Group B: Simple patches
# ══════════════════════════════════════════════════════════════

View File

@@ -0,0 +1,43 @@
"""Mixin: KernelJBPatchSecureRootMixin."""
from .kernel_jb_base import ARM64_OP_IMM
class KernelJBPatchSecureRootMixin:
def patch_io_secure_bsd_root(self):
"""Skip security check in _IOSecureBSDRoot.
Anchor: 'SecureRootName' string → function → CBZ/CBNZ → unconditional B.
"""
self._log("\n[JB] _IOSecureBSDRoot: skip check")
# Try symbol first
foff = self._resolve_symbol("_IOSecureBSDRoot")
if foff < 0:
foff = self._find_func_by_string(b"SecureRootName")
if foff < 0:
self._log(" [-] function not found")
return False
func_end = self._find_func_end(foff, 0x400)
for off in range(foff, func_end - 4, 4):
d = self._disas_at(off)
if not d:
continue
i = d[0]
if i.mnemonic in ("cbnz", "cbz", "tbnz", "tbz"):
target = None
for op in reversed(i.operands):
if op.type == ARM64_OP_IMM:
target = op.imm
break
if target and target > off:
b_bytes = self._encode_b(off, target)
if b_bytes:
self.emit(
off, b_bytes, f"b #0x{target - off:X} [_IOSecureBSDRoot]"
)
return True
self._log(" [-] conditional branch not found")
return False

View File

@@ -0,0 +1,44 @@
"""Mixin: KernelJBPatchSharedRegionMixin."""
from .kernel_jb_base import ARM64_OP_REG, CMP_X0_X0
class KernelJBPatchSharedRegionMixin:
def patch_shared_region_map(self):
"""Force shared region check: cmp x0,x0.
Anchor: '/private/preboot/Cryptexes' string → function → CMP+B.NE.
"""
self._log("\n[JB] _shared_region_map_and_slide_setup: cmp x0,x0")
# Try symbol first
foff = self._resolve_symbol("_shared_region_map_and_slide_setup")
if foff < 0:
foff = self._find_func_by_string(
b"/private/preboot/Cryptexes", self.kern_text
)
if foff < 0:
foff = self._find_func_by_string(b"/private/preboot/Cryptexes")
if foff < 0:
self._log(" [-] function not found")
return False
func_end = self._find_func_end(foff, 0x2000)
for off in range(foff, func_end - 4, 4):
d = self._disas_at(off, 2)
if len(d) < 2:
continue
i0, i1 = d[0], d[1]
if i0.mnemonic != "cmp" or i1.mnemonic != "b.ne":
continue
ops = i0.operands
if len(ops) < 2:
continue
if ops[0].type == ARM64_OP_REG and ops[1].type == ARM64_OP_REG:
self.emit(
off, CMP_X0_X0, "cmp x0,x0 [_shared_region_map_and_slide_setup]"
)
return True
self._log(" [-] CMP+B.NE pattern not found")
return False

View File

@@ -0,0 +1,66 @@
"""Mixin: KernelJBPatchSpawnPersonaMixin."""
from .kernel_jb_base import NOP
class KernelJBPatchSpawnPersonaMixin:
def patch_spawn_validate_persona(self):
"""NOP persona validation: LDR + TBNZ sites.
Pattern: ldr wN, [xN, #0x600] (unique struct offset) followed by
cbz wN then tbnz wN, #1 — NOP both the LDR and the TBNZ.
"""
self._log("\n[JB] _spawn_validate_persona: NOP (2 sites)")
# Try symbol first
foff = self._resolve_symbol("_spawn_validate_persona")
if foff >= 0:
func_end = self._find_func_end(foff, 0x800)
result = self._find_persona_pattern(foff, func_end)
if result:
self.emit(result[0], NOP, "NOP [_spawn_validate_persona LDR]")
self.emit(result[1], NOP, "NOP [_spawn_validate_persona TBNZ]")
return True
# Pattern search: ldr wN, [xN, #0x600] ... tbnz wN, #1
# This pattern is unique to _spawn_validate_persona
ks, ke = self.kern_text
for off in range(ks, ke - 0x30, 4):
d = self._disas_at(off)
if not d or d[0].mnemonic != "ldr":
continue
if "#0x600" not in d[0].op_str:
continue
if not d[0].op_str.startswith("w"):
continue
# Found LDR wN, [xN, #0x600] — look for TBNZ wN, #1 within 0x30
for delta in range(4, 0x30, 4):
d2 = self._disas_at(off + delta)
if not d2:
continue
if d2[0].mnemonic == "tbnz" and "#1" in d2[0].op_str:
# Verify it's a w-register
if d2[0].op_str.startswith("w"):
self._log(f" [+] LDR at 0x{off:X}, TBNZ at 0x{off + delta:X}")
self.emit(off, NOP, "NOP [_spawn_validate_persona LDR]")
self.emit(
off + delta, NOP, "NOP [_spawn_validate_persona TBNZ]"
)
return True
self._log(" [-] pattern not found")
return False
def _find_persona_pattern(self, start, end):
"""Find ldr wN,[xN,#0x600] + tbnz wN,#1 pattern. Returns (ldr_off, tbnz_off)."""
for off in range(start, end - 0x30, 4):
d = self._disas_at(off)
if not d or d[0].mnemonic != "ldr":
continue
if "#0x600" not in d[0].op_str or not d[0].op_str.startswith("w"):
continue
for delta in range(4, 0x30, 4):
d2 = self._disas_at(off + delta)
if d2 and d2[0].mnemonic == "tbnz" and "#1" in d2[0].op_str:
if d2[0].op_str.startswith("w"):
return (off, off + delta)
return None

View File

@@ -0,0 +1,175 @@
"""Mixin: KernelJBPatchSyscallmaskMixin."""
from .kernel_jb_base import asm, _rd32, _rd64, RET, struct
class KernelJBPatchSyscallmaskMixin:
def patch_syscallmask_apply_to_proc(self):
"""Redirect _syscallmask_apply_to_proc to custom filter shellcode.
Anchor: 'syscallmask.c' string → find function → redirect to cave.
"""
self._log("\n[JB] _syscallmask_apply_to_proc: shellcode (filter mask)")
# Resolve required functions
func_off = self._resolve_symbol("_syscallmask_apply_to_proc")
zalloc_off = self._resolve_symbol("_zalloc_ro_mut")
filter_off = self._resolve_symbol("_proc_set_syscall_filter_mask")
if func_off < 0:
# String anchor: "syscallmask.c"
str_off = self.find_string(b"syscallmask.c")
if str_off >= 0:
refs = self.find_string_refs(str_off, *self.kern_text)
if not refs:
refs = self.find_string_refs(str_off)
if refs:
# The function containing this string ref is in the
# syscallmask module. Find _syscallmask_apply_to_proc
# by looking for a function nearby that takes 4 args.
base_func = self.find_function_start(refs[0][0])
if base_func >= 0:
# Search nearby functions for the one that has a
# BL to _proc_set_syscall_filter_mask-like function.
# Actually, the function with "syscallmask.c" IS likely
# _syscallmask_apply_to_proc or very close to it.
func_off = base_func
if func_off < 0:
self._log(" [-] _syscallmask_apply_to_proc not found")
return False
# Find _zalloc_ro_mut: search for the BL target from within the function
# that's called with specific arguments. Use BL callers analysis.
if zalloc_off < 0:
func_end = self._find_func_end(func_off, 0x200)
for off in range(func_off, func_end, 4):
target = self._is_bl(off)
if target >= 0:
# _zalloc_ro_mut is typically one of the BL targets
# It's the one with many callers (>50)
# bl_callers is keyed by file offset (same as _is_bl returns)
n = len(self.bl_callers.get(target, []))
if n > 50:
zalloc_off = target
break
# Find _proc_set_syscall_filter_mask: search for a BL or B target
if filter_off < 0:
func_end = self._find_func_end(func_off, 0x200)
# It's typically the last BL/B target in the function (tail call)
for off in range(func_end - 4, func_off, -4):
target = self._is_bl(off)
if target >= 0:
filter_off = target
break
# Also check for unconditional B
val = _rd32(self.raw, off)
if (val & 0xFC000000) == 0x14000000:
imm26 = val & 0x3FFFFFF
if imm26 & (1 << 25):
imm26 -= 1 << 26
target = off + imm26 * 4
if self.kern_text[0] <= target < self.kern_text[1]:
filter_off = target
break
if zalloc_off < 0 or filter_off < 0:
self._log(
f" [-] required functions not found "
f"(zalloc={'found' if zalloc_off >= 0 else 'missing'}, "
f"filter={'found' if filter_off >= 0 else 'missing'})"
)
return False
# Find code cave (need ~160 bytes)
cave = self._find_code_cave(160)
if cave < 0:
self._log(" [-] no code cave found")
return False
cave_base = cave
# Encode BL to _zalloc_ro_mut (at cave + 28*4)
zalloc_bl_off = cave_base + 28 * 4
zalloc_bl = self._encode_bl(zalloc_bl_off, zalloc_off)
if not zalloc_bl:
self._log(" [-] BL to _zalloc_ro_mut out of range")
return False
# Encode B to _proc_set_syscall_filter_mask (at end of shellcode)
filter_b_off = cave_base + 37 * 4
filter_b = self._encode_b(filter_b_off, filter_off)
if not filter_b:
self._log(" [-] B to _proc_set_syscall_filter_mask out of range")
return False
# Build shellcode
shellcode_parts = []
for _ in range(10):
shellcode_parts.append(b"\xff\xff\xff\xff")
shellcode_parts.append(asm("cbz x2, #0x6c")) # idx 10
shellcode_parts.append(asm("sub sp, sp, #0x40")) # idx 11
shellcode_parts.append(asm("stp x19, x20, [sp, #0x10]")) # idx 12
shellcode_parts.append(asm("stp x21, x22, [sp, #0x20]")) # idx 13
shellcode_parts.append(asm("stp x29, x30, [sp, #0x30]")) # idx 14
shellcode_parts.append(asm("mov x19, x0")) # idx 15
shellcode_parts.append(asm("mov x20, x1")) # idx 16
shellcode_parts.append(asm("mov x21, x2")) # idx 17
shellcode_parts.append(asm("mov x22, x3")) # idx 18
shellcode_parts.append(asm("mov x8, #8")) # idx 19
shellcode_parts.append(asm("mov x0, x17")) # idx 20
shellcode_parts.append(asm("mov x1, x21")) # idx 21
shellcode_parts.append(asm("mov x2, #0")) # idx 22
# adr x3, #-0x5C — encode manually
adr_delta = -(23 * 4)
immhi = (adr_delta >> 2) & 0x7FFFF
immlo = adr_delta & 0x3
adr_insn = 0x10000003 | (immlo << 29) | (immhi << 5)
shellcode_parts.append(struct.pack("<I", adr_insn)) # idx 23
shellcode_parts.append(asm("udiv x4, x22, x8")) # idx 24
shellcode_parts.append(asm("msub x10, x4, x8, x22")) # idx 25
shellcode_parts.append(asm("cbz x10, #8")) # idx 26
shellcode_parts.append(asm("add x4, x4, #1")) # idx 27
shellcode_parts.append(zalloc_bl) # idx 28
shellcode_parts.append(asm("mov x0, x19")) # idx 29
shellcode_parts.append(asm("mov x1, x20")) # idx 30
shellcode_parts.append(asm("mov x2, x21")) # idx 31
shellcode_parts.append(asm("mov x3, x22")) # idx 32
shellcode_parts.append(asm("ldp x19, x20, [sp, #0x10]")) # idx 33
shellcode_parts.append(asm("ldp x21, x22, [sp, #0x20]")) # idx 34
shellcode_parts.append(asm("ldp x29, x30, [sp, #0x30]")) # idx 35
shellcode_parts.append(asm("add sp, sp, #0x40")) # idx 36
shellcode_parts.append(filter_b) # idx 37
# Write shellcode
for i, part in enumerate(shellcode_parts):
self.emit(
cave_base + i * 4,
part,
f"shellcode+{i * 4} [_syscallmask_apply_to_proc]",
)
# Redirect original function
func_end = self._find_func_end(func_off, 0x200)
for off in range(func_off, min(func_off + 0x100, func_end), 4):
d = self._disas_at(off)
if not d:
continue
if d[0].mnemonic == "bl":
self.emit(
off - 4,
asm("mov x17, x0"),
"mov x17,x0 [_syscallmask_apply_to_proc inject]",
)
b_to_cave = self._encode_b(off, cave_base + 10 * 4)
if b_to_cave:
self.emit(
off,
b_to_cave,
f"b cave [_syscallmask_apply_to_proc -> 0x{cave_base + 40:X}]",
)
return True
self._log(" [-] injection point not found")
return False

View File

@@ -0,0 +1,69 @@
"""Mixin: KernelJBPatchTaskConversionMixin."""
from .kernel_jb_base import ARM64_OP_REG, ARM64_OP_MEM, ARM64_REG_X0, ARM64_REG_X1, CMP_XZR_XZR
class KernelJBPatchTaskConversionMixin:
def patch_task_conversion_eval_internal(self):
"""Allow task conversion: cmp Xn,x0 -> cmp xzr,xzr at unique guard site."""
self._log("\n[JB] task_conversion_eval_internal: cmp xzr,xzr")
candidates = []
ks, ke = self.kern_text
for off in range(ks + 4, ke - 12, 4):
d0 = self._disas_at(off)
if not d0:
continue
i0 = d0[0]
if i0.mnemonic != "cmp" or len(i0.operands) < 2:
continue
a0, a1 = i0.operands[0], i0.operands[1]
if not (a0.type == ARM64_OP_REG and a1.type == ARM64_OP_REG):
continue
if a1.reg != ARM64_REG_X0:
continue
cmp_reg = a0.reg
dp = self._disas_at(off - 4)
d1 = self._disas_at(off + 4)
d2 = self._disas_at(off + 8)
d3 = self._disas_at(off + 12)
if not dp or not d1 or not d2 or not d3:
continue
p = dp[0]
i1, i2, i3 = d1[0], d2[0], d3[0]
if p.mnemonic != "ldr" or len(p.operands) < 2:
continue
p0, p1 = p.operands[0], p.operands[1]
if p0.type != ARM64_OP_REG or p0.reg != cmp_reg:
continue
if p1.type != ARM64_OP_MEM:
continue
if p1.mem.base != cmp_reg:
continue
if i1.mnemonic != "b.eq":
continue
if i2.mnemonic != "cmp" or len(i2.operands) < 2:
continue
j0, j1 = i2.operands[0], i2.operands[1]
if not (j0.type == ARM64_OP_REG and j1.type == ARM64_OP_REG):
continue
if not (j0.reg == cmp_reg and j1.reg == ARM64_REG_X1):
continue
if i3.mnemonic != "b.eq":
continue
candidates.append(off)
if len(candidates) != 1:
self._log(
f" [-] expected 1 task-conversion guard site, found {len(candidates)}"
)
return False
self.emit(
candidates[0], CMP_XZR_XZR, "cmp xzr,xzr [_task_conversion_eval_internal]"
)
return True

View File

@@ -0,0 +1,117 @@
"""Mixin: KernelJBPatchTaskForPidMixin."""
from .kernel_jb_base import NOP
class KernelJBPatchTaskForPidMixin:
def patch_task_for_pid(self):
"""NOP proc_ro security policy copy in _task_for_pid.
Pattern: _task_for_pid is a Mach trap handler (0 BL callers) with:
- 2x ldadda (proc reference counting)
- 2x ldr wN,[xN,#0x490]; str wN,[xN,#0xc] (proc_ro security copy)
- movk xN, #0xc8a2, lsl #48 (PAC discriminator)
- BL to a non-panic function with >500 callers (proc_find etc.)
NOP the second ldr wN,[xN,#0x490] (the target process security copy).
"""
self._log("\n[JB] _task_for_pid: NOP")
# Try symbol first
foff = self._resolve_symbol("_task_for_pid")
if foff >= 0:
func_end = self._find_func_end(foff, 0x800)
patch_off = self._find_second_ldr490(foff, func_end)
if patch_off:
self.emit(patch_off, NOP, "NOP [_task_for_pid proc_ro copy]")
return True
# Pattern search: scan kern_text for functions matching the profile
ks, ke = self.kern_text
off = ks
while off < ke - 4:
d = self._disas_at(off)
if not d or d[0].mnemonic != "pacibsp":
off += 4
continue
func_start = off
func_end = self._find_func_end(func_start, 0x1000)
# Quick filter: skip functions with BL callers (Mach trap = indirect)
if self.bl_callers.get(func_start, []):
off = func_end
continue
ldadda_count = 0
ldr490_count = 0
ldr490_offs = []
has_movk_c8a2 = False
has_high_caller_bl = False
for o in range(func_start, func_end, 4):
d = self._disas_at(o)
if not d:
continue
i = d[0]
if i.mnemonic == "ldadda":
ldadda_count += 1
elif (
i.mnemonic == "ldr"
and "#0x490" in i.op_str
and i.op_str.startswith("w")
):
d2 = self._disas_at(o + 4)
if (
d2
and d2[0].mnemonic == "str"
and "#0xc" in d2[0].op_str
and d2[0].op_str.startswith("w")
):
ldr490_count += 1
ldr490_offs.append(o)
elif i.mnemonic == "movk" and "#0xc8a2" in i.op_str:
has_movk_c8a2 = True
elif i.mnemonic == "bl":
target = i.operands[0].imm
n_callers = len(self.bl_callers.get(target, []))
# >500 but <8000 excludes _panic (typically 8000+)
if 500 < n_callers < 8000:
has_high_caller_bl = True
if (
ldadda_count >= 2
and ldr490_count >= 2
and has_movk_c8a2
and has_high_caller_bl
):
patch_off = ldr490_offs[1] # NOP the second occurrence
self._log(
f" [+] _task_for_pid at 0x{func_start:X}, patch at 0x{patch_off:X}"
)
self.emit(patch_off, NOP, "NOP [_task_for_pid proc_ro copy]")
return True
off = func_end
self._log(" [-] function not found")
return False
def _find_second_ldr490(self, start, end):
"""Find the second ldr wN,[xN,#0x490]+str wN,[xN,#0xc] in range."""
count = 0
for off in range(start, end - 4, 4):
d = self._disas_at(off)
if not d or d[0].mnemonic != "ldr":
continue
if "#0x490" not in d[0].op_str or not d[0].op_str.startswith("w"):
continue
d2 = self._disas_at(off + 4)
if (
d2
and d2[0].mnemonic == "str"
and "#0xc" in d2[0].op_str
and d2[0].op_str.startswith("w")
):
count += 1
if count == 2:
return off
return None

View File

@@ -0,0 +1,103 @@
"""Mixin: KernelJBPatchThidCrashMixin."""
from .kernel_jb_base import _rd32, _rd64
class KernelJBPatchThidCrashMixin:
def patch_thid_should_crash(self):
"""Zero out _thid_should_crash global variable.
Anchor: 'thid_should_crash' string in __DATA → nearby sysctl_oid struct
contains a raw pointer (low32 = file offset) to the variable.
"""
self._log("\n[JB] _thid_should_crash: zero out")
# Try symbol first
foff = self._resolve_symbol("_thid_should_crash")
if foff >= 0:
self.emit(foff, b"\x00\x00\x00\x00", "zero [_thid_should_crash]")
return True
# Find the string in __DATA (sysctl name string)
str_off = self.find_string(b"thid_should_crash")
if str_off < 0:
self._log(" [-] string not found")
return False
self._log(f" [*] string at foff 0x{str_off:X}")
# The sysctl_oid struct is near the string in __DATA.
# It contains 8-byte entries, one of which has its low32 bits
# equal to the file offset of the variable (chained fixup encoding).
# The variable is a 4-byte int (typically value 1) in __DATA_CONST.
#
# Search forward from the string for 8-byte values whose low32
# points to a valid location holding a small non-zero value.
data_const_ranges = [
(fo, fo + fs)
for name, _, fo, fs, _ in self.all_segments
if name in ("__DATA_CONST",) and fs > 0
]
for delta in range(0, 128, 8):
check = str_off + delta
if check + 8 > self.size:
break
val = _rd64(self.raw, check)
if val == 0:
continue
low32 = val & 0xFFFFFFFF
# The variable should be in __DATA_CONST or __DATA
if low32 == 0 or low32 >= self.size:
continue
# Check if low32 points to a location holding a small int (1-255)
target_val = _rd32(self.raw, low32)
if 1 <= target_val <= 255:
# Verify it's in a data segment (not code)
in_data = any(s <= low32 < e for s, e in data_const_ranges)
if not in_data:
# Also accept __DATA segments
in_data = any(
fo <= low32 < fo + fs
for name, _, fo, fs, _ in self.all_segments
if "DATA" in name and fs > 0
)
if in_data:
self._log(
f" [+] variable at foff 0x{low32:X} "
f"(value={target_val}, found via sysctl_oid "
f"at str+0x{delta:X})"
)
self.emit(low32, b"\x00\x00\x00\x00", "zero [_thid_should_crash]")
return True
# Fallback: if string has code refs, search via ADRP+ADD
refs = self.find_string_refs(str_off)
if refs:
func_start = self.find_function_start(refs[0][0])
if func_start >= 0:
func_end = self._find_func_end(func_start, 0x200)
for off in range(func_start, func_end - 4, 4):
d = self._disas_at(off, 2)
if len(d) < 2:
continue
i0, i1 = d[0], d[1]
if i0.mnemonic == "adrp" and i1.mnemonic == "add":
page = (i0.operands[1].imm - self.base_va) & ~0xFFF
imm12 = i1.operands[2].imm if len(i1.operands) > 2 else 0
target = page + imm12
if 0 < target < self.size:
tv = _rd32(self.raw, target)
if 1 <= tv <= 255:
self.emit(
target,
b"\x00\x00\x00\x00",
"zero [_thid_should_crash]",
)
return True
self._log(" [-] variable not found")
return False
# ══════════════════════════════════════════════════════════════
# Group C: Complex shellcode patches
# ══════════════════════════════════════════════════════════════

View File

@@ -0,0 +1,77 @@
"""Mixin: KernelJBPatchVmFaultMixin."""
from .kernel_jb_base import ARM64_OP_REG, ARM64_REG_W0, NOP
class KernelJBPatchVmFaultMixin:
def patch_vm_fault_enter_prepare(self):
"""NOP a PMAP check in _vm_fault_enter_prepare.
Find BL to a rarely-called function followed within 4 instructions
by TBZ/TBNZ on w0.
"""
self._log("\n[JB] _vm_fault_enter_prepare: NOP")
# Try symbol first
foff = self._resolve_symbol("_vm_fault_enter_prepare")
if foff >= 0:
func_end = self._find_func_end(foff, 0x2000)
result = self._find_bl_tbz_pmap(foff + 0x100, func_end)
if result:
self.emit(result, NOP, "NOP [_vm_fault_enter_prepare]")
return True
# String anchor: all refs to "vm_fault_enter_prepare"
str_off = self.find_string(b"vm_fault_enter_prepare")
if str_off >= 0:
refs = self.find_string_refs(str_off)
for adrp_off, _, _ in refs:
func_start = self.find_function_start(adrp_off)
if func_start < 0:
continue
func_end = self._find_func_end(func_start, 0x4000)
result = self._find_bl_tbz_pmap(func_start + 0x100, func_end)
if result:
self.emit(result, NOP, "NOP [_vm_fault_enter_prepare]")
return True
# Broader: scan all kern_text for BL to rarely-called func + TBZ w0
# in a large function (>0x2000 bytes)
ks, ke = self.kern_text
for off in range(ks, ke - 16, 4):
result = self._find_bl_tbz_pmap(off, min(off + 16, ke))
if result:
# Verify it's in a large function
func_start = self.find_function_start(result)
if func_start >= 0:
func_end = self._find_func_end(func_start, 0x4000)
if func_end - func_start > 0x2000:
self.emit(result, NOP, "NOP [_vm_fault_enter_prepare]")
return True
self._log(" [-] patch site not found")
return False
def _find_bl_tbz_pmap(self, start, end):
"""Find BL to a rarely-called function followed within 4 insns by TBZ/TBNZ w0.
Returns the BL offset, or None."""
for off in range(start, end - 4, 4):
d0 = self._disas_at(off)
if not d0 or d0[0].mnemonic != "bl":
continue
bl_target = d0[0].operands[0].imm
n_callers = len(self.bl_callers.get(bl_target, []))
if n_callers >= 20:
continue
# Check next 4 instructions for TBZ/TBNZ on w0
for delta in range(1, 5):
d1 = self._disas_at(off + delta * 4)
if not d1:
break
i1 = d1[0]
if i1.mnemonic in ("tbnz", "tbz") and len(i1.operands) >= 2:
if (
i1.operands[0].type == ARM64_OP_REG
and i1.operands[0].reg == ARM64_REG_W0
):
return off
return None

View File

@@ -0,0 +1,48 @@
"""Mixin: KernelJBPatchVmProtectMixin."""
from .kernel_jb_base import ARM64_OP_IMM
class KernelJBPatchVmProtectMixin:
def patch_vm_map_protect(self):
"""Skip a check in _vm_map_protect: branch over guard.
Anchor: 'vm_map_protect(' panic string → function → TBNZ with high bit.
"""
self._log("\n[JB] _vm_map_protect: skip check")
# Try symbol first
foff = self._resolve_symbol("_vm_map_protect")
if foff < 0:
# String anchor
foff = self._find_func_by_string(b"vm_map_protect(", self.kern_text)
if foff < 0:
foff = self._find_func_by_string(b"vm_map_protect(")
if foff < 0:
self._log(" [-] function not found")
return False
func_end = self._find_func_end(foff, 0x2000)
# Find TBNZ with bit >= 24 that branches forward (permission check guard)
for off in range(foff, func_end - 4, 4):
d = self._disas_at(off)
if not d:
continue
i = d[0]
if i.mnemonic != "tbnz":
continue
if len(i.operands) < 3:
continue
bit_op = i.operands[1]
if bit_op.type == ARM64_OP_IMM and bit_op.imm >= 24:
target = i.operands[2].imm if i.operands[2].type == ARM64_OP_IMM else -1
if target > off:
b_bytes = self._encode_b(off, target)
if b_bytes:
self.emit(
off, b_bytes, f"b #0x{target - off:X} [_vm_map_protect]"
)
return True
self._log(" [-] patch site not found")
return False

View File

@@ -0,0 +1,115 @@
"""Mixin: APFS graft and fsioc helpers."""
from .kernel_asm import MOV_W0_0, _PACIBSP_U32, _rd32
class KernelPatchApfsGraftMixin:
def _find_validate_root_hash_func(self):
"""Find validate_on_disk_root_hash function via 'authenticate_root_hash' string."""
str_off = self.find_string(b"authenticate_root_hash")
if str_off < 0:
return -1
refs = self.find_string_refs(str_off, *self.apfs_text)
if not refs:
return -1
return self.find_function_start(refs[0][0])
def patch_apfs_graft(self):
"""Patch 12: Replace BL to validate_on_disk_root_hash with mov w0,#0.
Instead of stubbing _apfs_graft at entry, find the specific BL
that calls the root hash validation and neutralize just that call.
"""
self._log("\n[12] _apfs_graft: mov w0,#0 (validate_root_hash BL)")
# Find _apfs_graft function
exact = self.raw.find(b"\x00apfs_graft\x00")
if exact < 0:
self._log(" [-] 'apfs_graft' string not found")
return False
str_off = exact + 1
refs = self.find_string_refs(str_off, *self.apfs_text)
if not refs:
self._log(" [-] no code refs")
return False
graft_start = self.find_function_start(refs[0][0])
if graft_start < 0:
self._log(" [-] _apfs_graft function start not found")
return False
# Find validate_on_disk_root_hash function
vrh_func = self._find_validate_root_hash_func()
if vrh_func < 0:
self._log(" [-] validate_on_disk_root_hash not found")
return False
# Scan _apfs_graft for BL to validate_on_disk_root_hash
# Don't stop at ret/retab (early returns) — only stop at PACIBSP (new function)
for scan in range(graft_start, min(graft_start + 0x2000, self.size), 4):
if scan > graft_start + 8 and _rd32(self.raw, scan) == _PACIBSP_U32:
break
bl_target = self._is_bl(scan)
if bl_target == vrh_func:
self.emit(scan, MOV_W0_0, "mov w0,#0 [_apfs_graft]")
return True
self._log(" [-] BL to validate_on_disk_root_hash not found in _apfs_graft")
return False
def _find_validate_payload_manifest_func(self):
"""Find the AppleImage4 validate_payload_and_manifest function."""
str_off = self.find_string(b"validate_payload_and_manifest")
if str_off < 0:
return -1
refs = self.find_string_refs(str_off, *self.apfs_text)
if not refs:
return -1
return self.find_function_start(refs[0][0])
def patch_handle_fsioc_graft(self):
"""Patch 15: Replace BL to validate_payload_and_manifest with mov w0,#0.
Instead of stubbing _handle_fsioc_graft at entry, find the specific
BL that calls AppleImage4 validation and neutralize just that call.
"""
self._log("\n[15] _handle_fsioc_graft: mov w0,#0 (validate BL)")
exact = self.raw.find(b"\x00handle_fsioc_graft\x00")
if exact < 0:
self._log(" [-] 'handle_fsioc_graft' string not found")
return False
str_off = exact + 1
refs = self.find_string_refs(str_off, *self.apfs_text)
if not refs:
self._log(" [-] no code refs")
return False
fsioc_start = self.find_function_start(refs[0][0])
if fsioc_start < 0:
self._log(" [-] function start not found")
return False
# Find the validation function
val_func = self._find_validate_payload_manifest_func()
if val_func < 0:
self._log(" [-] validate_payload_and_manifest not found")
return False
# Scan _handle_fsioc_graft for BL to validation function
for scan in range(fsioc_start, min(fsioc_start + 0x400, self.size), 4):
insns = self._disas_at(scan)
if not insns:
continue
if scan > fsioc_start + 8 and insns[0].mnemonic == "pacibsp":
break
bl_target = self._is_bl(scan)
if bl_target == val_func:
self.emit(scan, MOV_W0_0, "mov w0,#0 [_handle_fsioc_graft]")
return True
self._log(" [-] BL to validate_payload_and_manifest not found")
return False
# ── Sandbox MACF hooks ───────────────────────────────────────

View File

@@ -0,0 +1,143 @@
"""Mixin: APFS mount checks patches."""
from capstone.arm64_const import ARM64_OP_REG, ARM64_REG_W0, ARM64_REG_X0
from .kernel_asm import CMP_X0_X0, MOV_W0_0, _PACIBSP_U32, _rd32
class KernelPatchApfsMountMixin:
def patch_apfs_vfsop_mount_cmp(self):
"""Patch 13: cmp x0,x0 in _apfs_vfsop_mount (current_thread == kernel_task check).
The target CMP follows the pattern: BL (returns current_thread in x0),
ADRP + LDR + LDR (load kernel_task global), CMP x0, Xm, B.EQ.
We require x0 as the first CMP operand to distinguish it from other
CMP Xn,Xm instructions in the same function.
"""
self._log("\n[13] _apfs_vfsop_mount: cmp x0,x0 (mount rw check)")
refs_upgrade = self._find_by_string_in_range(
b"apfs_mount_upgrade_checks\x00",
self.apfs_text,
"apfs_mount_upgrade_checks",
)
if not refs_upgrade:
return False
func_start = self.find_function_start(refs_upgrade[0][0])
if func_start < 0:
return False
# Find BL callers of _apfs_mount_upgrade_checks
callers = self.bl_callers.get(func_start, [])
if not callers:
for off_try in [func_start, func_start + 4]:
callers = self.bl_callers.get(off_try, [])
if callers:
break
if not callers:
self._log(" [-] no BL callers of _apfs_mount_upgrade_checks found")
for off in range(self.apfs_text[0], self.apfs_text[1], 4):
bl_target = self._is_bl(off)
if bl_target >= 0 and func_start <= bl_target <= func_start + 4:
callers.append(off)
for caller_off in callers:
if not (self.apfs_text[0] <= caller_off < self.apfs_text[1]):
continue
# Scan a wider range — the CMP can be 0x800+ bytes before the BL
caller_func = self.find_function_start(caller_off)
scan_start = (
caller_func
if caller_func >= 0
else max(caller_off - 0x800, self.apfs_text[0])
)
scan_end = min(caller_off + 0x100, self.apfs_text[1])
for scan in range(scan_start, scan_end, 4):
dis = self._disas_at(scan)
if not dis or dis[0].mnemonic != "cmp":
continue
ops = dis[0].operands
if len(ops) < 2:
continue
# Require CMP Xn, Xm (both register operands)
if ops[0].type != ARM64_OP_REG or ops[1].type != ARM64_OP_REG:
continue
# Require x0 as first operand (return value from BL)
if ops[0].reg != ARM64_REG_X0:
continue
# Skip CMP x0, x0 (already patched or trivial)
if ops[0].reg == ops[1].reg:
continue
self.emit(
scan,
CMP_X0_X0,
f"cmp x0,x0 (was {dis[0].mnemonic} {dis[0].op_str}) "
"[_apfs_vfsop_mount]",
)
return True
self._log(" [-] CMP x0,Xm not found near mount_upgrade_checks caller")
return False
def patch_apfs_mount_upgrade_checks(self):
"""Patch 14: Replace TBNZ w0,#0xe with mov w0,#0 in _apfs_mount_upgrade_checks.
Within the function, a BL calls a small flag-reading leaf function,
then TBNZ w0,#0xe branches to the error path. Replace the TBNZ
with mov w0,#0 to force the success path.
"""
self._log("\n[14] _apfs_mount_upgrade_checks: mov w0,#0 (tbnz bypass)")
refs = self._find_by_string_in_range(
b"apfs_mount_upgrade_checks\x00",
self.apfs_text,
"apfs_mount_upgrade_checks",
)
if not refs:
return False
func_start = self.find_function_start(refs[0][0])
if func_start < 0:
self._log(" [-] function start not found")
return False
# Scan for BL followed by TBNZ w0
# Don't stop at ret/retab (early returns) — only stop at PACIBSP (new function)
for scan in range(func_start, min(func_start + 0x200, self.size), 4):
if scan > func_start + 8 and _rd32(self.raw, scan) == _PACIBSP_U32:
break
bl_target = self._is_bl(scan)
if bl_target < 0:
continue
# Check if BL target is a small leaf function (< 0x20 bytes, ends with ret)
is_leaf = False
for k in range(0, 0x20, 4):
if bl_target + k >= self.size:
break
dis = self._disas_at(bl_target + k)
if dis and dis[0].mnemonic == "ret":
is_leaf = True
break
if not is_leaf:
continue
# Check next instruction is TBNZ w0, #0xe
next_off = scan + 4
insns = self._disas_at(next_off)
if not insns:
continue
i = insns[0]
if i.mnemonic == "tbnz" and len(i.operands) >= 1:
if (
i.operands[0].type == ARM64_OP_REG
and i.operands[0].reg == ARM64_REG_W0
):
self.emit(
next_off, MOV_W0_0, "mov w0,#0 [_apfs_mount_upgrade_checks]"
)
return True
self._log(" [-] BL + TBNZ w0 pattern not found")
return False

View File

@@ -0,0 +1,48 @@
"""Mixin: APFS seal broken patch."""
from .kernel_asm import NOP
class KernelPatchApfsSealMixin:
def patch_apfs_seal_broken(self):
"""Patch 2: NOP the conditional branch leading to 'root volume seal is broken' panic."""
self._log("\n[2] _authapfs_seal_is_broken: seal broken panic")
str_off = self.find_string(b"root volume seal is broken")
if str_off < 0:
self._log(" [-] string not found")
return False
refs = self.find_string_refs(str_off, *self.apfs_text)
if not refs:
self._log(" [-] no code refs")
return False
for adrp_off, add_off, _ in refs:
# Find BL _panic after string ref
bl_off = -1
for scan in range(add_off, min(add_off + 0x40, self.size), 4):
bl_target = self._is_bl(scan)
if bl_target == self.panic_off:
bl_off = scan
break
if bl_off < 0:
continue
# Search backwards for a conditional branch that jumps INTO the
# panic path. The error block may set up __FILE__/line args
# before the string ADRP, so allow target up to 0x40 before it.
err_lo = adrp_off - 0x40
for back in range(adrp_off - 4, max(adrp_off - 0x200, 0), -4):
target, kind = self._decode_branch_target(back)
if target is not None and err_lo <= target <= bl_off + 4:
self.emit(
back,
NOP,
f"NOP {kind} (seal broken) [_authapfs_seal_is_broken]",
)
return True
self._log(" [-] could not find conditional branch to NOP")
return False

View File

@@ -0,0 +1,50 @@
"""Mixin: APFS root snapshot patch."""
from capstone.arm64_const import ARM64_OP_IMM, ARM64_OP_REG
from .kernel_asm import NOP
class KernelPatchApfsSnapshotMixin:
def patch_apfs_root_snapshot(self):
"""Patch 1: NOP the tbnz w8,#5 that gates sealed-volume root snapshot panic."""
self._log("\n[1] _apfs_vfsop_mount: root snapshot sealed volume check")
refs = self._find_by_string_in_range(
b"Rooting from snapshot with xid", self.apfs_text, "apfs_vfsop_mount log"
)
if not refs:
refs = self._find_by_string_in_range(
b"Failed to find the root snapshot",
self.apfs_text,
"root snapshot panic",
)
if not refs:
return False
for adrp_off, add_off, _ in refs:
for scan in range(add_off, min(add_off + 0x200, self.size), 4):
insns = self._disas_at(scan)
if not insns:
continue
i = insns[0]
if i.mnemonic not in ("tbnz", "tbz"):
continue
# Check: tbz/tbnz w8, #5, ...
ops = i.operands
if (
len(ops) >= 2
and ops[0].type == ARM64_OP_REG
and ops[1].type == ARM64_OP_IMM
and ops[1].imm == 5
):
self.emit(
scan,
NOP,
f"NOP {i.mnemonic} {i.op_str} "
"(sealed vol check) [_apfs_vfsop_mount]",
)
return True
self._log(" [-] tbz/tbnz w8,#5 not found near xref")
return False

View File

@@ -0,0 +1,46 @@
"""Mixin: bsd_init rootvp patch."""
from .kernel_asm import MOV_X0_0, NOP
class KernelPatchBsdInitMixin:
def patch_bsd_init_rootvp(self):
"""Patch 3: NOP the conditional branch guarding the 'rootvp not authenticated' panic."""
self._log("\n[3] _bsd_init: rootvp not authenticated panic")
str_off = self.find_string(b"rootvp not authenticated after mounting")
if str_off < 0:
self._log(" [-] string not found")
return False
refs = self.find_string_refs(str_off, *self.kern_text)
if not refs:
self._log(" [-] no code refs in kernel __text")
return False
for adrp_off, add_off, _ in refs:
# Find the BL _panic after the string ref
bl_panic_off = -1
for scan in range(add_off, min(add_off + 0x40, self.size), 4):
bl_target = self._is_bl(scan)
if bl_target == self.panic_off:
bl_panic_off = scan
break
if bl_panic_off < 0:
continue
# Search backwards for a conditional branch whose target is in
# the error path (the block ending with BL _panic).
# The error path is typically a few instructions before BL _panic.
err_lo = bl_panic_off - 0x40 # error block start (generous)
err_hi = bl_panic_off + 4 # error block end
for back in range(adrp_off - 4, max(adrp_off - 0x400, 0), -4):
target, kind = self._decode_branch_target(back)
if target is not None and err_lo <= target <= err_hi:
self.emit(back, NOP, f"NOP {kind} (rootvp auth) [_bsd_init]")
return True
self._log(" [-] conditional branch into panic path not found")
return False

View File

@@ -0,0 +1,95 @@
"""Mixin: debugger enablement patch."""
from capstone.arm64_const import ARM64_OP_REG, ARM64_REG_X8
from .kernel_asm import MOV_X0_1, RET, _rd32, _rd64
class KernelPatchDebuggerMixin:
def patch_PE_i_can_has_debugger(self):
"""Patches 6-7: mov x0,#1; ret at _PE_i_can_has_debugger."""
self._log("\n[6-7] _PE_i_can_has_debugger: stub with mov x0,#1; ret")
# Strategy 1: find symbol name in __LINKEDIT and parse nearby VA
str_off = self.find_string(b"\x00_PE_i_can_has_debugger\x00")
if str_off < 0:
str_off = self.find_string(b"PE_i_can_has_debugger")
if str_off >= 0:
linkedit = None
for name, vmaddr, fileoff, filesize, _ in self.all_segments:
if name == "__LINKEDIT":
linkedit = (fileoff, fileoff + filesize)
if linkedit and linkedit[0] <= str_off < linkedit[1]:
name_end = self.raw.find(b"\x00", str_off + 1)
if name_end > 0:
for probe in range(name_end + 1, min(name_end + 32, self.size - 7)):
val = _rd64(self.raw, probe)
func_foff = val - self.base_va
if self.kern_text[0] <= func_foff < self.kern_text[1]:
first_insn = _rd32(self.raw, func_foff)
if first_insn != 0 and first_insn != 0xD503201F:
self.emit(
func_foff,
MOV_X0_1,
"mov x0,#1 [_PE_i_can_has_debugger]",
)
self.emit(
func_foff + 4, RET, "ret [_PE_i_can_has_debugger]"
)
return True
# Strategy 2: code pattern — function starts with ADRP x8,
# preceded by a function boundary, has many BL callers,
# and reads a 32-bit (w-register) value within first few instructions.
self._log(" [*] trying code pattern search...")
# Determine kernel-only __text range from fileset entries if available
kern_text_start, kern_text_end = self._get_kernel_text_range()
best_off = -1
best_callers = 0
for off in range(kern_text_start, kern_text_end - 12, 4):
dis = self._disas_at(off)
if not dis or dis[0].mnemonic != "adrp":
continue
# Must target x8
if dis[0].operands[0].reg != ARM64_REG_X8:
continue
# Must be preceded by function boundary
if off >= 4:
prev = _rd32(self.raw, off - 4)
if not self._is_func_boundary(prev):
continue
# Must read a w-register (32-bit) from [x8, #imm] within first 6 instructions
has_w_load = False
for k in range(1, 7):
if off + k * 4 >= self.size:
break
dk = self._disas_at(off + k * 4)
if (
dk
and dk[0].mnemonic == "ldr"
and dk[0].op_str.startswith("w")
and "x8" in dk[0].op_str
):
has_w_load = True
break
if not has_w_load:
continue
# Count callers — _PE_i_can_has_debugger has ~80-200 callers
# (widely used but not a basic kernel primitive)
n_callers = len(self.bl_callers.get(off, []))
if 50 <= n_callers <= 250 and n_callers > best_callers:
best_callers = n_callers
best_off = off
if best_off >= 0:
self._log(
f" [+] code pattern match at 0x{best_off:X} ({best_callers} callers)"
)
self.emit(best_off, MOV_X0_1, "mov x0,#1 [_PE_i_can_has_debugger]")
self.emit(best_off + 4, RET, "ret [_PE_i_can_has_debugger]")
return True
self._log(" [-] function not found")
return False

View File

@@ -0,0 +1,62 @@
"""Mixin: dyld policy patch."""
from .kernel_asm import MOV_W0_1
class KernelPatchDyldPolicyMixin:
def patch_check_dyld_policy(self):
"""Patches 10-11: Replace two BL calls in _check_dyld_policy_internal with mov w0,#1.
The function is found via its reference to the Swift Playgrounds
entitlement string. The two BLs immediately preceding that string
reference (each followed by a conditional branch on w0) are patched.
"""
self._log("\n[10-11] _check_dyld_policy_internal: mov w0,#1 (two BLs)")
# Anchor: entitlement string referenced from within the function
str_off = self.find_string(
b"com.apple.developer.swift-playgrounds-app.development-build"
)
if str_off < 0:
self._log(" [-] swift-playgrounds entitlement string not found")
return False
refs = self.find_string_refs(str_off, *self.amfi_text)
if not refs:
refs = self.find_string_refs(str_off)
if not refs:
self._log(" [-] no code refs in AMFI")
return False
for adrp_off, add_off, _ in refs:
# Walk backward from the ADRP, looking for BL + conditional-on-w0 pairs
bls_with_cond = [] # [(bl_off, bl_target), ...]
for back in range(adrp_off - 4, max(adrp_off - 80, 0), -4):
bl_target = self._is_bl(back)
if bl_target < 0:
continue
if self._is_cond_branch_w0(back + 4):
bls_with_cond.append((back, bl_target))
if len(bls_with_cond) >= 2:
bl2_off, bl2_tgt = bls_with_cond[0] # closer to ADRP
bl1_off, bl1_tgt = bls_with_cond[1] # farther from ADRP
# The two BLs must call DIFFERENT functions — this
# distinguishes _check_dyld_policy_internal from other
# functions that repeat calls to the same helper.
if bl1_tgt == bl2_tgt:
continue
self.emit(
bl1_off,
MOV_W0_1,
"mov w0,#1 (was BL) [_check_dyld_policy_internal @1]",
)
self.emit(
bl2_off,
MOV_W0_1,
"mov w0,#1 (was BL) [_check_dyld_policy_internal @2]",
)
return True
self._log(" [-] _check_dyld_policy_internal BL pair not found")
return False

View File

@@ -0,0 +1,38 @@
"""Mixin: launch constraints patch."""
from .kernel_asm import MOV_W0_0, RET
class KernelPatchLaunchConstraintsMixin:
def patch_proc_check_launch_constraints(self):
"""Patches 4-5: mov w0,#0; ret at _proc_check_launch_constraints start.
The AMFI function does NOT reference the symbol name string
'_proc_check_launch_constraints' — only the kernel wrapper does.
Instead, use 'AMFI: Validation Category info' which IS referenced
from the actual AMFI function.
"""
self._log("\n[4-5] _proc_check_launch_constraints: stub with mov w0,#0; ret")
str_off = self.find_string(b"AMFI: Validation Category info")
if str_off < 0:
self._log(" [-] 'AMFI: Validation Category info' string not found")
return False
refs = self.find_string_refs(str_off, *self.amfi_text)
if not refs:
self._log(" [-] no code refs in AMFI")
return False
for adrp_off, add_off, _ in refs:
func_start = self.find_function_start(adrp_off)
if func_start < 0:
continue
self.emit(
func_start, MOV_W0_0, "mov w0,#0 [_proc_check_launch_constraints]"
)
self.emit(func_start + 4, RET, "ret [_proc_check_launch_constraints]")
return True
self._log(" [-] function start not found")
return False

View File

@@ -0,0 +1,122 @@
"""Mixin: post-validation patches."""
from capstone.arm64_const import ARM64_OP_IMM, ARM64_OP_REG, ARM64_REG_W0
from .kernel_asm import CMP_W0_W0, NOP, _PACIBSP_U32, _rd32
class KernelPatchPostValidationMixin:
def patch_post_validation_nop(self):
"""Patch 8: NOP the TBNZ after TXM CodeSignature error logging.
The 'TXM [Error]: CodeSignature: selector: ...' string is followed
by a BL (printf/log), then a TBNZ that branches to an additional
validation path. NOP the TBNZ to skip it.
"""
self._log("\n[8] post-validation NOP (txm-related)")
str_off = self.find_string(b"TXM [Error]: CodeSignature")
if str_off < 0:
self._log(" [-] 'TXM [Error]: CodeSignature' string not found")
return False
refs = self.find_string_refs(str_off, *self.kern_text)
if not refs:
refs = self.find_string_refs(str_off)
if not refs:
self._log(" [-] no code refs")
return False
for adrp_off, add_off, _ in refs:
# Scan forward past the BL (printf/log) for a TBNZ
for scan in range(add_off, min(add_off + 0x40, self.size), 4):
insns = self._disas_at(scan)
if not insns:
continue
if insns[0].mnemonic == "tbnz":
self.emit(
scan,
NOP,
f"NOP {insns[0].mnemonic} {insns[0].op_str} "
"[txm post-validation]",
)
return True
self._log(" [-] TBNZ not found after TXM error string ref")
return False
def patch_post_validation_cmp(self):
"""Patch 9: cmp w0,w0 in postValidation (AMFI code signing).
The 'AMFI: code signature validation failed' string is in the CALLER
function, not in postValidation itself. We find the caller, collect
its BL targets, then look inside each target for CMP W0, #imm + B.NE.
"""
self._log("\n[9] postValidation: cmp w0,w0 (AMFI code signing)")
str_off = self.find_string(b"AMFI: code signature validation failed")
if str_off < 0:
self._log(" [-] string not found")
return False
refs = self.find_string_refs(str_off, *self.amfi_text)
if not refs:
refs = self.find_string_refs(str_off)
if not refs:
self._log(" [-] no code refs")
return False
caller_start = self.find_function_start(refs[0][0])
if caller_start < 0:
self._log(" [-] caller function start not found")
return False
# Collect unique BL targets from the caller function
# Only stop at PACIBSP (new function), not at ret/retab (early returns)
bl_targets = set()
for scan in range(caller_start, min(caller_start + 0x2000, self.size), 4):
if scan > caller_start + 8 and _rd32(self.raw, scan) == _PACIBSP_U32:
break
target = self._is_bl(scan)
if target >= 0:
bl_targets.add(target)
# In each BL target in AMFI, look for: BL ... ; CMP W0, #imm ; B.NE
# The CMP must check W0 (return value of preceding BL call).
for target in sorted(bl_targets):
if not (self.amfi_text[0] <= target < self.amfi_text[1]):
continue
for off in range(target, min(target + 0x200, self.size), 4):
if off > target + 8 and _rd32(self.raw, off) == _PACIBSP_U32:
break
dis = self._disas_at(off, 2)
if len(dis) < 2:
continue
i0, i1 = dis[0], dis[1]
if i0.mnemonic != "cmp" or i1.mnemonic != "b.ne":
continue
# Must be CMP W0, #imm (first operand = w0, second = immediate)
ops = i0.operands
if len(ops) < 2:
continue
if ops[0].type != ARM64_OP_REG or ops[0].reg != ARM64_REG_W0:
continue
if ops[1].type != ARM64_OP_IMM:
continue
# Must be preceded by a BL within 2 instructions
has_bl = False
for gap in (4, 8):
if self._is_bl(off - gap) >= 0:
has_bl = True
break
if not has_bl:
continue
self.emit(
off,
CMP_W0_W0,
f"cmp w0,w0 (was {i0.mnemonic} {i0.op_str}) [postValidation]",
)
return True
self._log(" [-] CMP+B.NE pattern not found in caller's BL targets")
return False

View File

@@ -0,0 +1,46 @@
"""Mixin: sandbox hook patches."""
from .kernel_asm import MOV_X0_0, RET
class KernelPatchSandboxMixin:
def patch_sandbox_hooks(self):
"""Patches 16-25: Stub Sandbox MACF hooks with mov x0,#0; ret.
Uses mac_policy_ops struct indices from XNU source (xnu-11215+).
"""
self._log("\n[16-25] Sandbox MACF hooks")
ops_table = self._find_sandbox_ops_table_via_conf()
if ops_table is None:
return False
HOOK_INDICES = {
"file_check_mmap": 36,
"mount_check_mount": 87,
"mount_check_remount": 88,
"mount_check_umount": 91,
"vnode_check_rename": 120,
}
sb_start, sb_end = self.sandbox_text
patched_count = 0
for hook_name, idx in HOOK_INDICES.items():
func_off = self._read_ops_entry(ops_table, idx)
if func_off is None or func_off <= 0:
self._log(f" [-] ops[{idx}] {hook_name}: NULL or invalid")
continue
if not (sb_start <= func_off < sb_end):
self._log(
f" [-] ops[{idx}] {hook_name}: foff 0x{func_off:X} "
f"outside Sandbox (0x{sb_start:X}-0x{sb_end:X})"
)
continue
self.emit(func_off, MOV_X0_0, f"mov x0,#0 [_hook_{hook_name}]")
self.emit(func_off + 4, RET, f"ret [_hook_{hook_name}]")
self._log(f" [+] ops[{idx}] {hook_name} at foff 0x{func_off:X}")
patched_count += 1
return patched_count > 0

View File

@@ -2,26 +2,18 @@
"""
txm_jb.py — Jailbreak extension patcher for TXM images.
All patch sites are found dynamically via string xrefs + instruction pattern
matching. No fixed byte offsets.
Reuses shared TXM logic from txm_dev.py and adds the selector24 CodeSignature
hash-extraction bypass used only by the JB variant.
"""
from keystone import Ks, KS_ARCH_ARM64, KS_MODE_LITTLE_ENDIAN as KS_MODE_LE
from .txm import TXMPatcher, MOV_X0_0, _asm, _disasm_one
from .txm_dev import TXMPatcher as TXMDevPatcher, _asm, _disasm_one
_ks = Ks(KS_ARCH_ARM64, KS_MODE_LE)
NOP = _asm("nop")
MOV_X0_1 = _asm("mov x0, #1")
MOV_W0_1 = _asm("mov w0, #1")
MOV_X0_X20 = _asm("mov x0, x20")
STRB_W0_X20_30 = _asm("strb w0, [x20, #0x30]")
PACIBSP = _asm("hint #27")
class TXMJBPatcher(TXMPatcher):
"""JB-only TXM patcher."""
class TXMJBPatcher(TXMDevPatcher):
"""JB TXM patcher: dev TXM patches + selector24 extension."""
def apply(self):
self.find_all()
@@ -40,149 +32,8 @@ class TXMJBPatcher(TXMPatcher):
self.patch_developer_mode_bypass()
return self.patches
# ── helpers ──────────────────────────────────────────────────
def _asm_at(self, asm_line, addr):
enc, _ = _ks.asm(asm_line, addr=addr)
if not enc:
raise RuntimeError(f"asm failed at 0x{addr:X}: {asm_line}")
return bytes(enc)
def _find_func_start(self, off, back=0x1000):
start = max(0, off - back)
for scan in range(off & ~3, start - 1, -4):
if self.raw[scan : scan + 4] == PACIBSP:
return scan
return None
def _find_func_end(self, func_start, forward=0x1200):
end = min(self.size, func_start + forward)
for scan in range(func_start + 4, end, 4):
if self.raw[scan : scan + 4] == PACIBSP:
return scan
return end
def _find_refs_to_offset(self, target_off):
refs = []
for off in range(0, self.size - 8, 4):
a = _disasm_one(self.raw, off)
b = _disasm_one(self.raw, off + 4)
if not a or not b:
continue
if a.mnemonic != "adrp" or b.mnemonic != "add":
continue
if len(a.operands) < 2 or len(b.operands) < 3:
continue
if a.operands[0].reg != b.operands[1].reg:
continue
if a.operands[1].imm + b.operands[2].imm == target_off:
refs.append((off, off + 4))
return refs
def _find_string_refs(self, needle):
if isinstance(needle, str):
needle = needle.encode()
refs = []
seen = set()
off = 0
while True:
s_off = self.raw.find(needle, off)
if s_off < 0:
break
off = s_off + 1
for r in self._find_refs_to_offset(s_off):
if r[0] not in seen:
seen.add(r[0])
refs.append((s_off, r[0], r[1]))
return refs
def _ref_in_function(self, refs, func_start):
out = []
for s_off, adrp_off, add_off in refs:
fs = self._find_func_start(adrp_off)
if fs == func_start:
out.append((s_off, adrp_off, add_off))
return out
def _find_debugger_gate_func_start(self):
refs = self._find_string_refs(b"com.apple.private.cs.debugger")
starts = set()
for _, _, add_off in refs:
for scan in range(add_off, min(add_off + 0x20, self.size - 8), 4):
i = _disasm_one(self.raw, scan)
n = _disasm_one(self.raw, scan + 4)
p1 = _disasm_one(self.raw, scan - 4) if scan >= 4 else None
p2 = _disasm_one(self.raw, scan - 8) if scan >= 8 else None
if not all((i, n, p1, p2)):
continue
if not (
i.mnemonic == "bl"
and n.mnemonic == "tbnz"
and n.op_str.startswith("w0, #0,")
and p1.mnemonic == "mov"
and p1.op_str == "x2, #0"
and p2.mnemonic == "mov"
and p2.op_str == "x0, #0"
):
continue
fs = self._find_func_start(scan)
if fs is not None:
starts.add(fs)
if len(starts) != 1:
return None
return next(iter(starts))
def _find_udf_cave(self, min_insns=6, near_off=None, max_distance=0x80000):
need = min_insns * 4
start = 0 if near_off is None else max(0, near_off - 0x1000)
end = self.size if near_off is None else min(self.size, near_off + max_distance)
best = None
best_dist = None
off = start
while off < end:
run = off
while run < end and self.raw[run : run + 4] == b"\x00\x00\x00\x00":
run += 4
if run - off >= need:
prev = _disasm_one(self.raw, off - 4) if off >= 4 else None
if prev and prev.mnemonic in (
"b",
"b.eq",
"b.ne",
"b.lo",
"b.hs",
"cbz",
"cbnz",
"tbz",
"tbnz",
):
# Leave 2-word safety gap after the preceding branch
padded = off + 8
if padded + need <= run:
return padded
return off
if near_off is not None and _disasm_one(self.raw, off):
dist = abs(off - near_off)
if best is None or dist < best_dist:
best = off
best_dist = dist
off = run + 4 if run > off else off + 4
return best
# ── JB patches ───────────────────────────────────────────────
def patch_selector24_hash_extraction_nop(self):
"""NOP the hash flags extraction BL and its LDR X1 arg setup.
The CS hash validator function has a distinctive dual-BL pattern:
LDR X0, [Xn, #0x30] ; blob data
LDR X1, [Xn, #0x38] ; blob size <-- NOP
ADD X2, SP, #... ; output ptr
BL hash_flags_extract ; <-- NOP
LDP X0, X1, [Xn, #0x30] ; reload for 2nd call
ADD X2, SP, #...
BL hash_data_lookup ; (keep)
Found via 'mov w0, #0xa1' anchor unique to this function.
"""
"""NOP hash-flags extraction setup/call in selector24 path."""
for off in range(0, self.size - 4, 4):
ins = _disasm_one(self.raw, off)
if not (ins and ins.mnemonic == "mov" and ins.op_str == "w0, #0xa1"):
@@ -219,163 +70,3 @@ class TXMJBPatcher(TXMPatcher):
self._log(" [-] TXM JB: selector24 hash extraction site not found")
return False
def patch_get_task_allow_force_true(self):
"""Force get-task-allow entitlement call to return true."""
refs = self._find_string_refs(b"get-task-allow")
if not refs:
self._log(" [-] TXM JB: get-task-allow string refs not found")
return False
cands = []
for _, _, add_off in refs:
for scan in range(add_off, min(add_off + 0x20, self.size - 4), 4):
i = _disasm_one(self.raw, scan)
n = _disasm_one(self.raw, scan + 4)
if not i or not n:
continue
if (
i.mnemonic == "bl"
and n.mnemonic == "tbnz"
and n.op_str.startswith("w0, #0,")
):
cands.append(scan)
if len(cands) != 1:
self._log(
f" [-] TXM JB: expected 1 get-task-allow BL site, found {len(cands)}"
)
return False
self.emit(cands[0], MOV_X0_1, "get-task-allow: bl -> mov x0,#1")
return True
def patch_selector42_29_shellcode(self):
"""Selector 42|29 patch via dynamic cave shellcode + branch redirect."""
fn = self._find_debugger_gate_func_start()
if fn is None:
self._log(" [-] TXM JB: debugger-gate function not found (selector42|29)")
return False
stubs = []
for off in range(4, self.size - 24, 4):
p = _disasm_one(self.raw, off - 4)
i0 = _disasm_one(self.raw, off)
i1 = _disasm_one(self.raw, off + 4)
i2 = _disasm_one(self.raw, off + 8)
i3 = _disasm_one(self.raw, off + 12)
i4 = _disasm_one(self.raw, off + 16)
i5 = _disasm_one(self.raw, off + 20)
if not all((p, i0, i1, i2, i3, i4, i5)):
continue
if not (p.mnemonic == "bti" and p.op_str == "j"):
continue
if not (i0.mnemonic == "mov" and i0.op_str == "x0, x20"):
continue
if not (
i1.mnemonic == "bl" and i2.mnemonic == "mov" and i2.op_str == "x1, x21"
):
continue
if not (
i3.mnemonic == "mov"
and i3.op_str == "x2, x22"
and i4.mnemonic == "bl"
and i5.mnemonic == "b"
):
continue
if i4.operands and i4.operands[0].imm == fn:
stubs.append(off)
if len(stubs) != 1:
self._log(
f" [-] TXM JB: selector42|29 stub expected 1, found {len(stubs)}"
)
return False
stub_off = stubs[0]
cave = self._find_udf_cave(min_insns=6, near_off=stub_off)
if cave is None:
self._log(" [-] TXM JB: no UDF cave found for selector42|29 shellcode")
return False
self.emit(
stub_off,
self._asm_at(f"b #0x{cave:X}", stub_off),
"selector42|29: branch to shellcode",
)
self.emit(cave, NOP, "selector42|29 shellcode pad: udf -> nop")
self.emit(cave + 4, MOV_X0_1, "selector42|29 shellcode: mov x0,#1")
self.emit(
cave + 8, STRB_W0_X20_30, "selector42|29 shellcode: strb w0,[x20,#0x30]"
)
self.emit(cave + 12, MOV_X0_X20, "selector42|29 shellcode: mov x0,x20")
self.emit(
cave + 16,
self._asm_at(f"b #0x{stub_off + 4:X}", cave + 16),
"selector42|29 shellcode: branch back",
)
return True
def patch_debugger_entitlement_force_true(self):
"""Force debugger entitlement call to return true."""
refs = self._find_string_refs(b"com.apple.private.cs.debugger")
if not refs:
self._log(" [-] TXM JB: debugger refs not found")
return False
cands = []
for _, _, add_off in refs:
for scan in range(add_off, min(add_off + 0x20, self.size - 4), 4):
i = _disasm_one(self.raw, scan)
n = _disasm_one(self.raw, scan + 4)
p1 = _disasm_one(self.raw, scan - 4) if scan >= 4 else None
p2 = _disasm_one(self.raw, scan - 8) if scan >= 8 else None
if not all((i, n, p1, p2)):
continue
if (
i.mnemonic == "bl"
and n.mnemonic == "tbnz"
and n.op_str.startswith("w0, #0,")
and p1.mnemonic == "mov"
and p1.op_str == "x2, #0"
and p2.mnemonic == "mov"
and p2.op_str == "x0, #0"
):
cands.append(scan)
if len(cands) != 1:
self._log(f" [-] TXM JB: expected 1 debugger BL site, found {len(cands)}")
return False
self.emit(cands[0], MOV_W0_1, "debugger entitlement: bl -> mov w0,#1")
return True
def patch_developer_mode_bypass(self):
"""Developer-mode bypass: NOP conditional guard before deny log path."""
refs = self._find_string_refs(
b"developer mode enabled due to system policy configuration"
)
if not refs:
self._log(" [-] TXM JB: developer-mode string ref not found")
return False
cands = []
for _, _, add_off in refs:
for back in range(add_off - 4, max(add_off - 0x20, 0), -4):
ins = _disasm_one(self.raw, back)
if not ins:
continue
if ins.mnemonic not in ("tbz", "tbnz", "cbz", "cbnz"):
continue
if not ins.op_str.startswith("w9, #0,"):
continue
cands.append(back)
if len(cands) != 1:
self._log(
f" [-] TXM JB: expected 1 developer mode guard, found {len(cands)}"
)
return False
self.emit(cands[0], NOP, "developer mode bypass")
return True

View File

@@ -10,7 +10,7 @@ set -euo pipefail
IRECOVERY="${IRECOVERY:-irecovery}"
RAMDISK_DIR="${1:-Ramdisk}"
if [ ! -d "$RAMDISK_DIR" ]; then
if [[ ! -d "$RAMDISK_DIR" ]]; then
echo "[-] Ramdisk directory not found: $RAMDISK_DIR"
echo " Run 'make ramdisk_build' first."
exit 1

View File

@@ -19,8 +19,8 @@ SDKROOT="$(xcrun --sdk macosx --show-sdk-path)"
OPENSSL_PREFIX="$(brew --prefix openssl@3 2>/dev/null || true)"
[[ -d "$OPENSSL_PREFIX" ]] || {
echo "[-] openssl@3 not found. Run: brew install openssl@3" >&2
exit 1
echo "[-] openssl@3 not found. Run: brew install openssl@3" >&2
exit 1
}
export PKG_CONFIG_PATH="$PREFIX/lib/pkgconfig:$OPENSSL_PREFIX/lib/pkgconfig"
@@ -33,42 +33,42 @@ mkdir -p "$SRC" "$LOG"
# ── Helpers ──────────────────────────────────────────────────────
die() {
echo "[-] $*" >&2
exit 1
echo "[-] $*" >&2
exit 1
}
check_tools() {
local missing=()
for cmd in autoconf automake pkg-config cmake git; do
command -v "$cmd" &>/dev/null || missing+=("$cmd")
done
command -v glibtoolize &>/dev/null || command -v libtoolize &>/dev/null ||
missing+=("libtool(ize)")
((${#missing[@]} == 0)) || die "Missing: ${missing[*]} — brew install ${missing[*]}"
local missing=()
for cmd in autoconf automake pkg-config cmake git; do
command -v "$cmd" &>/dev/null || missing+=("$cmd")
done
command -v glibtoolize &>/dev/null || command -v libtoolize &>/dev/null ||
missing+=("libtool(ize)")
((${#missing[@]} == 0)) || die "Missing: ${missing[*]} — brew install ${missing[*]}"
}
clone() {
local url=$1 dir=$2
if [[ -d "$dir/.git" ]]; then
git -C "$dir" fetch --depth 1 origin --quiet
git -C "$dir" reset --hard FETCH_HEAD --quiet
git -C "$dir" clean -fdx --quiet
else
git clone --depth 1 "$url" "$dir" --quiet
fi
local url="$1" dir="$2"
if [[ -d "$dir/.git" ]]; then
git -C "$dir" fetch --depth 1 origin --quiet
git -C "$dir" reset --hard FETCH_HEAD --quiet
git -C "$dir" clean -fdx --quiet
else
git clone --depth 1 "$url" "$dir" --quiet
fi
}
build_lib() {
local name=$1
shift
echo " $name"
cd "$SRC/$name"
./autogen.sh --prefix="$PREFIX" \
--enable-shared=no --enable-static=yes \
"$@" >"$LOG/$name-configure.log" 2>&1
make -j"$NPROC" >"$LOG/$name-build.log" 2>&1
make install >"$LOG/$name-install.log" 2>&1
cd "$SRC"
local name="$1"
shift
echo " $name"
cd "$SRC/$name"
./autogen.sh --prefix="$PREFIX" \
--enable-shared=no --enable-static=yes \
"$@" >"$LOG/$name-configure.log" 2>&1
make -j"$NPROC" >"$LOG/$name-build.log" 2>&1
make install >"$LOG/$name-install.log" 2>&1
cd "$SRC"
}
# ── Preflight ────────────────────────────────────────────────────
@@ -81,11 +81,11 @@ echo ""
echo "[1/3] Core libraries (using homebrew openssl@3)"
for lib in libplist libimobiledevice-glue libusbmuxd libtatsu libimobiledevice; do
clone "https://github.com/libimobiledevice/$lib" "$SRC/$lib"
case "$lib" in
libplist | libimobiledevice) build_lib "$lib" --without-cython ;;
*) build_lib "$lib" ;;
esac
clone "https://github.com/libimobiledevice/$lib" "$SRC/$lib"
case "$lib" in
libplist | libimobiledevice) build_lib "$lib" --without-cython ;;
*) build_lib "$lib" ;;
esac
done
# ── 2. libirecovery (+ PCC research VM patch) ───────────────────
@@ -95,10 +95,10 @@ clone "https://github.com/libimobiledevice/libirecovery" "$SRC/libirecovery"
# PR #150: register iPhone99,11 / vresearch101ap for PCC research VMs
if ! grep -q 'vresearch101ap' "$SRC/libirecovery/src/libirecovery.c"; then
cd "$SRC/libirecovery"
git apply "$SCRIPT_DIR/patches/libirecovery-pcc-vm.patch" ||
die "Failed to apply libirecovery PCC patch — check context"
cd "$SRC"
cd "$SRC/libirecovery"
git apply "$SCRIPT_DIR/patches/libirecovery-pcc-vm.patch" ||
die "Failed to apply libirecovery PCC patch — check context"
cd "$SRC"
fi
build_lib libirecovery
@@ -106,21 +106,21 @@ build_lib libirecovery
LIBZIP_VER="1.11.4"
if [[ ! -f "$PREFIX/lib/pkgconfig/libzip.pc" ]]; then
echo " libzip"
[[ -d "$SRC/libzip-$LIBZIP_VER" ]] ||
curl -LfsS "https://github.com/nih-at/libzip/releases/download/v$LIBZIP_VER/libzip-$LIBZIP_VER.tar.gz" |
tar xz -C "$SRC"
cmake -S "$SRC/libzip-$LIBZIP_VER" -B "$SRC/libzip-$LIBZIP_VER/build" \
-DCMAKE_INSTALL_PREFIX="$PREFIX" -DCMAKE_OSX_SYSROOT="$SDKROOT" \
-DBUILD_SHARED_LIBS=OFF -DBUILD_DOC=OFF -DBUILD_EXAMPLES=OFF \
-DBUILD_REGRESS=OFF -DBUILD_TOOLS=OFF \
-DENABLE_BZIP2=OFF -DENABLE_LZMA=OFF -DENABLE_ZSTD=OFF \
-DENABLE_GNUTLS=OFF -DENABLE_MBEDTLS=OFF -DENABLE_OPENSSL=OFF \
>"$LOG/libzip-cmake.log" 2>&1
cmake --build "$SRC/libzip-$LIBZIP_VER/build" -j"$NPROC" \
>"$LOG/libzip-build.log" 2>&1
cmake --install "$SRC/libzip-$LIBZIP_VER/build" \
>"$LOG/libzip-install.log" 2>&1
echo " libzip"
[[ -d "$SRC/libzip-$LIBZIP_VER" ]] ||
curl -LfsS "https://github.com/nih-at/libzip/releases/download/v$LIBZIP_VER/libzip-$LIBZIP_VER.tar.gz" |
tar xz -C "$SRC"
cmake -S "$SRC/libzip-$LIBZIP_VER" -B "$SRC/libzip-$LIBZIP_VER/build" \
-DCMAKE_INSTALL_PREFIX="$PREFIX" -DCMAKE_OSX_SYSROOT="$SDKROOT" \
-DBUILD_SHARED_LIBS=OFF -DBUILD_DOC=OFF -DBUILD_EXAMPLES=OFF \
-DBUILD_REGRESS=OFF -DBUILD_TOOLS=OFF \
-DENABLE_BZIP2=OFF -DENABLE_LZMA=OFF -DENABLE_ZSTD=OFF \
-DENABLE_GNUTLS=OFF -DENABLE_MBEDTLS=OFF -DENABLE_OPENSSL=OFF \
>"$LOG/libzip-cmake.log" 2>&1
cmake --build "$SRC/libzip-$LIBZIP_VER/build" -j"$NPROC" \
>"$LOG/libzip-build.log" 2>&1
cmake --install "$SRC/libzip-$LIBZIP_VER/build" \
>"$LOG/libzip-install.log" 2>&1
fi
# ── 3. idevicerestore ───────────────────────────────────────────
@@ -128,12 +128,12 @@ fi
echo "[3/3] idevicerestore"
clone "https://github.com/libimobiledevice/idevicerestore" "$SRC/idevicerestore"
build_lib idevicerestore \
libcurl_CFLAGS="-I$SDKROOT/usr/include" \
libcurl_LIBS="-lcurl" \
libcurl_VERSION="$(/usr/bin/curl-config --version | cut -d' ' -f2)" \
zlib_CFLAGS="-I$SDKROOT/usr/include" \
zlib_LIBS="-lz" \
zlib_VERSION="1.2"
libcurl_CFLAGS="-I$SDKROOT/usr/include" \
libcurl_LIBS="-lcurl" \
libcurl_VERSION="$(/usr/bin/curl-config --version | cut -d' ' -f2)" \
zlib_CFLAGS="-I$SDKROOT/usr/include" \
zlib_LIBS="-lz" \
zlib_VERSION="1.2"
# ── Done ─────────────────────────────────────────────────────────

View File

@@ -20,16 +20,16 @@ BREW_PACKAGES=(gnu-tar openssl@3 ldid-procursus sshpass)
BREW_MISSING=()
for pkg in "${BREW_PACKAGES[@]}"; do
if ! brew list "$pkg" &>/dev/null; then
BREW_MISSING+=("$pkg")
fi
if ! brew list "$pkg" &>/dev/null; then
BREW_MISSING+=("$pkg")
fi
done
if ((${#BREW_MISSING[@]} > 0)); then
echo " Installing: ${BREW_MISSING[*]}"
brew install "${BREW_MISSING[@]}"
echo " Installing: ${BREW_MISSING[*]}"
brew install "${BREW_MISSING[@]}"
else
echo " All brew packages installed"
echo " All brew packages installed"
fi
# ── Trustcache ─────────────────────────────────────────────────
@@ -38,24 +38,24 @@ echo "[2/4] trustcache"
TRUSTCACHE_BIN="$TOOLS_PREFIX/bin/trustcache"
if [[ -x "$TRUSTCACHE_BIN" ]]; then
echo " Already built: $TRUSTCACHE_BIN"
echo " Already built: $TRUSTCACHE_BIN"
else
echo " Building from source (CRKatri/trustcache)..."
BUILD_DIR=$(mktemp -d)
trap "rm -rf '$BUILD_DIR'" EXIT
echo " Building from source (CRKatri/trustcache)..."
BUILD_DIR=$(mktemp -d)
trap "rm -rf '$BUILD_DIR'" EXIT
git clone --depth 1 https://github.com/CRKatri/trustcache.git "$BUILD_DIR/trustcache" --quiet
git clone --depth 1 https://github.com/CRKatri/trustcache.git "$BUILD_DIR/trustcache" --quiet
OPENSSL_PREFIX="$(brew --prefix openssl@3)"
make -C "$BUILD_DIR/trustcache" \
OPENSSL=1 \
CFLAGS="-I$OPENSSL_PREFIX/include -DOPENSSL -w" \
LDFLAGS="-L$OPENSSL_PREFIX/lib" \
-j"$(sysctl -n hw.logicalcpu)" >/dev/null 2>&1
OPENSSL_PREFIX="$(brew --prefix openssl@3)"
make -C "$BUILD_DIR/trustcache" \
OPENSSL=1 \
CFLAGS="-I$OPENSSL_PREFIX/include -DOPENSSL -w" \
LDFLAGS="-L$OPENSSL_PREFIX/lib" \
-j"$(sysctl -n hw.logicalcpu)" >/dev/null 2>&1
mkdir -p "$TOOLS_PREFIX/bin"
cp "$BUILD_DIR/trustcache/trustcache" "$TRUSTCACHE_BIN"
echo " Installed: $TRUSTCACHE_BIN"
mkdir -p "$TOOLS_PREFIX/bin"
cp "$BUILD_DIR/trustcache/trustcache" "$TRUSTCACHE_BIN"
echo " Installed: $TRUSTCACHE_BIN"
fi
# ── Libimobiledevice ──────────────────────────────────────────

View File

@@ -42,7 +42,7 @@ pip install -r "${REQUIREMENTS}"
echo ""
echo "=== Building keystone dylib ==="
KEYSTONE_DIR="/opt/homebrew/Cellar/keystone"
if [ ! -d "${KEYSTONE_DIR}" ]; then
if [[ ! -d "${KEYSTONE_DIR}" ]]; then
echo "Error: keystone not found. Install with: brew install keystone"
exit 1
fi

View File

@@ -23,8 +23,13 @@ class VPhoneVirtualMachineView: VZVirtualMachineView {
// MARK: - Event Handling
override var acceptsFirstResponder: Bool { true }
override func acceptsFirstMouse(for _: NSEvent?) -> Bool { true }
override var acceptsFirstResponder: Bool {
true
}
override func acceptsFirstMouse(for _: NSEvent?) -> Bool {
true
}
override func viewDidMoveToWindow() {
super.viewDidMoveToWindow()