monitor memory read operation code snippet

monitor memory read operation with VM_PROT_EXECUTE_ONLY permission

#include "executeonly_monitor.h"

#include <mach/vm_param.h>
#include <mach/mach.h>

#include <dlfcn.h>
#include <unistd.h>
#include <sys/mman.h>
#include <pthread.h>

#include <vector>
#include <unordered_map>

#include "logging/logging.h"

#define ALIGN_FLOOR(address, range) ((addr_t)address & ~((addr_t)range - 1))

typedef int32_t arm64_insn_t;

#define LOG_TAG "executeonly monitor"

#define submask(x) ((1L << ((x) + 1)) - 1)
#define bits(obj, st, fn) (((obj) >> (st)) & submask((fn) - (st)))
#define bit(obj, st) (((obj) >> (st)) & 1)
#define sbits(obj, st, fn) ((long)(bits(obj, st, fn) | ((long)bit(obj, fn) * ~submask(fn - st))))

std::unordered_map<addr_t, addr_t> *backup_pages = nullptr;

void handle_fault_with_execute_only_supported(arm_thread_state64_t *ts, addr_t ts_pc, addr_t fault_addr,
                                              addr_t fault_page_addr) {
  arm64_insn_t insn;
  insn = *(arm64_insn_t *)ts_pc;

#if 0
  // read fault insn
  addr_t ts_pc_page = ALIGN_FLOOR(ts_pc, PAGE_SIZE);
  auto   iter       = g_crc_page_map->find(ts_pc_page);
  if (iter == g_crc_page_map->end()) {
    insn = *(arm64_insn_t *)ts_pc;
  } else {
    insn = *(arm64_insn_t *)(iter->second.bakcup_page_addr + (ts_pc - ts_pc_page));
  }
#endif

  /* C4.1 A64 instruction set encoding */
  /* C4.1.4 Loads and Stores */
  int rn_ndx = -1;
  int rt_ndx = -1;
  if (insn & 0x0a000000) {
    uint32_t op0 = (insn & 0xf0000000) >> 28;
    uint32_t op1 = (insn & 0x08000000) >> 27;
    uint32_t op2 = (insn & 0x01800000) >> 23;
    uint32_t op4 = (insn & 0x00000c00) >> 10;
    if (((op0 & 0b0011) == 0b01) && ((op2 & 0b10) == 0b00)) {
      rn_ndx = -1;
    } else {
      rn_ndx = ((insn & 0x1e0) >> 5);
    }

    if (((op0 & 0b0011) == 0b11) && ((op2 & 0b10) == 0b00) && (op4 == 0b10)) {
      uint32_t size = (insn & 0xc0000000) >> 30;
      uint32_t V = (insn & 0x04000000) >> 26;
      uint32_t opc = (insn & 0x00c00000) >> 22;
      // ldrsw(register)
      if (size == 0b10 && V == 0 && opc == 0b10) {
        rt_ndx = insn & 0x1f;
        if (backup_pages->count(fault_page_addr) == 0) {
          return;
        }
        auto backup_page = (*backup_pages)[fault_page_addr];
        auto rn = ts->__x[rn_ndx];
        auto fault_backup_addr = backup_page + (fault_addr - fault_page_addr);
        ts->__x[rt_ndx] = (int64_t) * (int32_t *)fault_backup_addr;
        LOG(1, "set rt register: %p", fault_backup_addr);
      }
    }
  }

  rt_ndx = bits(insn, 0, 4);
  int size_flag = bits(insn, 30, 31);
  int opc = bits(insn, 22, 23);
  int post_pre_flag = bits(insn, 10, 11);
  LOG(1, "fault: post_pre: %d, size:%d, opc: %d, rn: %d, rt: %d", post_pre_flag, size_flag, opc, rn_ndx, rt_ndx);

  if (rn_ndx >= 0) {
    if (backup_pages->count(fault_page_addr) == 0) {
      return;
    }
    auto backup_page = (*backup_pages)[fault_page_addr];
    auto rn = ts->__x[rn_ndx];
    auto new_rn = backup_page + (rn - fault_page_addr);
    if ((size_flag & 0b00) == 0b00) {
      *(uint8_t *)&ts->__x[rt_ndx] = *(uint8_t *)new_rn;
      ts->__x[rn_ndx] += 1;
    } else if ((size_flag & 0b01) == 0b01) {
      *(uint16_t *)&ts->__x[rt_ndx] = *(uint16_t *)new_rn;
      ts->__x[rn_ndx] += 2;
    } else if ((size_flag & 0b10) == 0b10) {
      *(uint32_t *)&ts->__x[rt_ndx] = *(uint32_t *)new_rn;
      ts->__x[rn_ndx] += 4;
    } else {
      *(uint64_t *)&ts->__x[rt_ndx] = *(uint64_t *)new_rn;
      ts->__x[rn_ndx] += 8;
    }
  }

  if (rt_ndx >= 0) {
    arm_thread_state64_set_pc_fptr(*ts, ts_pc + 4);
  }
}

void set_page_execute_only(void *addr) {
  size_t page_size = sysconf(_SC_PAGESIZE);
  addr_t page = ALIGN_FLOOR(addr, page_size);
#if 0
  kern_return_t kr;
  kr = vm_protect(mach_task_self(), (mach_vm_address_t) page, (mach_vm_size_t) page_size, false, VM_PROT_EXECUTE_ONLY);
  if (kr != KERN_SUCCESS) {
    ERROR_LOG("failed: %s", mach_error_string(kr));
  }
#else
  int ret = mprotect((void *)page, page_size, VM_PROT_EXECUTE_ONLY);
  if (ret) {
    LOG(1, "mprotect failed: %s");
  }
#endif
}

static mach_port_t exception_port = MACH_PORT_NULL;

static bool check_if_fall_loop(arm_thread_state64_t *ts, arm_exception_state64_t *es, addr_t fault_addr) {
  // stack backtrace
  addr_t ts_fp = __darwin_arm_thread_state64_get_fp(*ts);
  uint64_t fp_frame[2] = {0};

  // fault at same address multi-times
  bool is_multi_same_fault = false;

  static int fault_stack_count = 0;
  static addr_t fault_stack[16] = {0};
  fault_stack[fault_stack_count++ % 16] = fault_addr;

  int count = 0;
  for (int i = 0; i < 16; i++) {
    if (fault_stack[i] == fault_addr)
      count += 1;
  }
  if (count >= 13) {
    is_multi_same_fault = true;
  }
  return is_multi_same_fault;
}

static bool check_if_invalid_access(addr_t fault_addr) {
  bool is_invalid_access = false;
  if (fault_addr < 0x100000000 || fault_addr > 0x800000000) {
    is_invalid_access = true;
  }
  return is_invalid_access;
}

static void *exception_handler(void *ctx) {
  Request In0P;
  mach_msg_header_t *InHeadP = &In0P.Head;
  for (;;) {
    kern_return_t kr;
    kr = mach_msg(&In0P.Head, MACH_RCV_MSG | MACH_MSG_TIMEOUT_NONE, 0, sizeof(Request), exception_port,
                  MACH_MSG_TIMEOUT_NONE, MACH_PORT_NULL);
    if (kr != KERN_SUCCESS) {
      LOG(1, "failed: %s", mach_error_string(kr));
      return NULL;
    }

    mach_port_t thread_port = In0P.thread.name;
    mach_port_t task_port = In0P.task.name;

    arm_thread_state64_t ts = {0};
    mach_msg_type_number_t ts_cnt = ARM_THREAD_STATE64_COUNT;
    kr = thread_get_state(thread_port, ARM_THREAD_STATE64, (thread_state_t)&ts, &ts_cnt);
    if (kr != KERN_SUCCESS) {
      LOG(1, "failed: %s", mach_error_string(kr));
      return NULL;
    }

    arm_exception_state64_t es = {0};
    mach_msg_type_number_t es_cnt = ARM_EXCEPTION_STATE64_COUNT;
    kr = thread_get_state(thread_port, ARM_EXCEPTION_STATE64, (thread_state_t)&es, &es_cnt);
    if (kr != KERN_SUCCESS) {
      LOG(1, "failed: %s", mach_error_string(kr));
      return NULL;
    }

    addr_t ts_pc = __darwin_arm_thread_state64_get_pc(ts);
    addr_t fault_addr = es.__far;
    addr_t fault_page_addr = ALIGN_FLOOR(fault_addr, PAGE_SIZE);
    LOG(1, "fault: at %p, pc %p", fault_addr, ts_pc);

    if (!check_if_invalid_access(fault_addr)) {
      handle_fault_with_execute_only_supported(&ts, ts_pc, fault_addr, fault_page_addr);
    }

    kr = thread_set_state(thread_port, ARM_THREAD_STATE64, (thread_state_t)&ts, ARM_THREAD_STATE64_COUNT);
    if (kr != KERN_SUCCESS) {
      LOG(1, "failed: %s", mach_error_string(kr));
      return NULL;
    }

    // reply
    typedef __Reply__mach_exception_raise_t Reply __attribute__((unused));
    Reply OutP;

    if (check_if_invalid_access(fault_addr)) {
      OutP.RetCode = KERN_FAILURE;
    }

    // reply
    kr = mach_msg(&OutP.Head, MACH_SEND_MSG | MACH_MSG_TIMEOUT_NONE, sizeof(Reply), 0, MACH_PORT_NULL,
                  MACH_MSG_TIMEOUT_NONE, MACH_PORT_NULL);
    if (kr != KERN_SUCCESS) {
      LOG(1, " failed: %s", mach_error_string(kr));
      return NULL;
    }
  }
  return NULL;
}

void install_memory_read_exception_callback() {
  static bool initialized = false;
  if (initialized)
    return;
  initialized = true;

  kern_return_t kr = KERN_SUCCESS;
  kr = mach_port_allocate(mach_task_self(), MACH_PORT_RIGHT_RECEIVE, &exception_port);
  if (kr != KERN_SUCCESS) {
    LOG(1, "failed: %s", mach_error_string(kr));
    return;
  }

  kr = mach_port_insert_right(mach_task_self(), exception_port, exception_port, MACH_MSG_TYPE_MAKE_SEND);
  if (kr != KERN_SUCCESS) {
    LOG(1, "failed: %s", mach_error_string(kr));
    return;
  }

  // set exception handler
  kr = task_set_exception_ports(mach_task_self(), EXC_MASK_ALL, exception_port,
                                EXCEPTION_DEFAULT | MACH_EXCEPTION_CODES, ARM_THREAD_STATE64);
  if (kr != KERN_SUCCESS) {
    LOG(1, "failed: %s", mach_error_string(kr));
    return;
  }

  // setup a new thread where to handle the exceptions
  pthread_t exception_handler_thread;
  pthread_create(&exception_handler_thread, NULL, exception_handler, NULL);

  LOG(1, "install memory read exception(port is %p) callback done", exception_port);
}

void executeonly_monitor_init() {
  install_memory_read_exception_callback();
}

addr_t allocate_page() {
  auto page = (addr_t)mmap(0, PAGE_SIZE, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS, VM_MAKE_TAG(255), 0);
  if ((void *)page == MAP_FAILED) {
    LOG(1, "mmap failed");
    return 0;
  }
  return page;
}

void allocate_backup_page(void *addr, char *buffer, int buffer_size) {
  addr_t page_addr = ALIGN_FLOOR(addr, PAGE_SIZE);
  auto backup_page_addr = allocate_page();
  memcpy((void *)backup_page_addr, (void *)page_addr, PAGE_SIZE);

  uint32_t page_offset = (addr_t)addr - page_addr;
  memcpy((void *)(backup_page_addr + page_offset), buffer, buffer_size);

  backup_pages->insert(std::make_pair(page_addr, backup_page_addr));
  LOG(1, "allocate backup page: %p --> %p, %p, %p", page_addr, backup_page_addr, *(uint64_t *)page_addr,
      *(uint64_t *)backup_page_addr);
}

int executeonly_monitor(void *addr, char *buffer, int buffer_size) {
  if (backup_pages == nullptr) {
    backup_pages = new std::unordered_map<addr_t, addr_t>();
  }

  addr_t page_addr = ALIGN_FLOOR(addr, PAGE_SIZE);
  LOG(1, "start monitor %p page", page_addr);

  allocate_backup_page(addr, buffer, buffer_size);
  set_page_execute_only(addr);
  return 0;
}
5 个赞

虽然没弄懂,但是非常佩服。我记得frida也有类似操作。

也可以通过硬件断点来实现

struct arm_debug_state64_t dbg;
mach_msg_type_number_t count = ARM_DEBUG_STATE64_COUNT;

dbg.__bvr[0] = addr;
dbg.__bcr[0] = 0x1 | (0x1ULL << 5); // // watch 1 byte, 0xf for 8 byte
dbg.__wvr[0] = addr;
dbg.__wcr[0] = 0x1 | 0x18 | (0x1ULL << 5); // watch 1 byte, 0xff for 16 byte

thread_set_state(thread, ARM_DEBUG_STATE64, (thread_state_t)&dbg, count);

新线程的watchpoint和breakpoint继承task级别的debug state设置


1 个赞

exception_code[0] == EXC_ARM_DA_DEBUG是watchpoint事件,EXC_ARM_BREAKPOINT是breakpoint事件
然后在异常处理函数里面设置dbg.__mdscr_el1 = 0x1就可以Single Step(设置SINGLE_STEP会忽略watchpoint和breakpoint),下一个exception code会收到0

1 个赞