Lines 341-346
Link Here
|
341 |
* Patch 64-bit hosts. |
341 |
* Patch 64-bit hosts. |
342 |
*/ |
342 |
*/ |
343 |
uint32_t cRipRelMovs = 0; |
343 |
uint32_t cRipRelMovs = 0; |
|
|
344 |
uint32_t cRelCalls = 0; |
344 |
|
345 |
|
345 |
/* Just use the disassembler to skip 12 bytes or more, we might need to |
346 |
/* Just use the disassembler to skip 12 bytes or more, we might need to |
346 |
rewrite mov instructions using RIP relative addressing. */ |
347 |
rewrite mov instructions using RIP relative addressing. */ |
Lines 349-355
Link Here
|
349 |
cbInstr = 1; |
350 |
cbInstr = 1; |
350 |
int rc = DISInstr(pbTarget + offJmpBack, DISCPUMODE_64BIT, &Dis, &cbInstr); |
351 |
int rc = DISInstr(pbTarget + offJmpBack, DISCPUMODE_64BIT, &Dis, &cbInstr); |
351 |
if ( RT_FAILURE(rc) |
352 |
if ( RT_FAILURE(rc) |
352 |
|| (Dis.pCurInstr->fOpType & DISOPTYPE_CONTROLFLOW) |
353 |
|| ( Dis.pCurInstr->fOpType & DISOPTYPE_CONTROLFLOW |
|
|
354 |
&& Dis.pCurInstr->uOpcode != OP_CALL) |
353 |
|| ( Dis.ModRM.Bits.Mod == 0 |
355 |
|| ( Dis.ModRM.Bits.Mod == 0 |
354 |
&& Dis.ModRM.Bits.Rm == 5 /* wrt RIP */ |
356 |
&& Dis.ModRM.Bits.Rm == 5 /* wrt RIP */ |
355 |
&& Dis.pCurInstr->uOpcode != OP_MOV)) |
357 |
&& Dis.pCurInstr->uOpcode != OP_MOV)) |
Lines 357-371
Link Here
|
357 |
|
359 |
|
358 |
if (Dis.ModRM.Bits.Mod == 0 && Dis.ModRM.Bits.Rm == 5 /* wrt RIP */) |
360 |
if (Dis.ModRM.Bits.Mod == 0 && Dis.ModRM.Bits.Rm == 5 /* wrt RIP */) |
359 |
cRipRelMovs++; |
361 |
cRipRelMovs++; |
|
|
362 |
if ( Dis.pCurInstr->uOpcode == OP_CALL |
363 |
&& (Dis.pCurInstr->fOpType & DISOPTYPE_RELATIVE_CONTROLFLOW)) |
364 |
cRelCalls++; |
360 |
|
365 |
|
361 |
offJmpBack += cbInstr; |
366 |
offJmpBack += cbInstr; |
362 |
cbPatchMem += cbInstr; |
367 |
cbPatchMem += cbInstr; |
363 |
} |
368 |
} |
364 |
|
369 |
|
|
|
370 |
/* |
371 |
* Each relative call requires extra bytes as it is converted to a pushq imm32 |
372 |
* + mov [RSP+4], imm32 + a jmp qword [$+8 wrt RIP] to avoid clobbering registers. |
373 |
*/ |
374 |
cbPatchMem += cRelCalls * RT_ALIGN_32(13 + 6 + 8, 8); |
365 |
cbPatchMem += 14; /* jmp qword [$+8 wrt RIP] + 8 byte address to jump to. */ |
375 |
cbPatchMem += 14; /* jmp qword [$+8 wrt RIP] + 8 byte address to jump to. */ |
366 |
cbPatchMem = RT_ALIGN_32(cbPatchMem, 8); |
376 |
cbPatchMem = RT_ALIGN_32(cbPatchMem, 8); |
367 |
|
377 |
|
368 |
/* Allocate suitable exectuable memory available. */ |
378 |
/* Allocate suitable executable memory available. */ |
369 |
bool fConvRipRelMovs = false; |
379 |
bool fConvRipRelMovs = false; |
370 |
uint8_t *pbPatchMem = supR3HardenedMainPosixExecMemAlloc(cbPatchMem, pbTarget, cRipRelMovs > 0); |
380 |
uint8_t *pbPatchMem = supR3HardenedMainPosixExecMemAlloc(cbPatchMem, pbTarget, cRipRelMovs > 0); |
371 |
if (!pbPatchMem) |
381 |
if (!pbPatchMem) |
Lines 396-402
Link Here
|
396 |
cbInstr = 1; |
406 |
cbInstr = 1; |
397 |
int rc = DISInstr(pbTarget + offInsn, DISCPUMODE_64BIT, &Dis, &cbInstr); |
407 |
int rc = DISInstr(pbTarget + offInsn, DISCPUMODE_64BIT, &Dis, &cbInstr); |
398 |
if ( RT_FAILURE(rc) |
408 |
if ( RT_FAILURE(rc) |
399 |
|| (Dis.pCurInstr->fOpType & DISOPTYPE_CONTROLFLOW)) |
409 |
|| ( Dis.pCurInstr->fOpType & DISOPTYPE_CONTROLFLOW |
|
|
410 |
&& Dis.pCurInstr->uOpcode != OP_CALL)) |
400 |
return VERR_SUPLIB_UNEXPECTED_INSTRUCTION; |
411 |
return VERR_SUPLIB_UNEXPECTED_INSTRUCTION; |
401 |
|
412 |
|
402 |
if ( Dis.ModRM.Bits.Mod == 0 |
413 |
if ( Dis.ModRM.Bits.Mod == 0 |
Lines 439-444
Link Here
|
439 |
pbPatchMem += sizeof(int32_t); |
450 |
pbPatchMem += sizeof(int32_t); |
440 |
} |
451 |
} |
441 |
} |
452 |
} |
|
|
453 |
else if ( Dis.pCurInstr->uOpcode == OP_CALL |
454 |
&& (Dis.pCurInstr->fOpType & DISOPTYPE_RELATIVE_CONTROLFLOW)) |
455 |
{ |
456 |
/* Convert to absolute jump. */ |
457 |
uintptr_t uAddr = (uintptr_t)&pbTarget[offInsn + cbInstr] + (intptr_t)Dis.Param1.uValue; |
458 |
|
459 |
/* Skip the push instructions till the return address is known. */ |
460 |
uint8_t *pbPatchMemPush = pbPatchMem; |
461 |
pbPatchMem += 13; |
462 |
|
463 |
*pbPatchMem++ = 0xff; /* jmp qword [$+8 wrt RIP] */ |
464 |
*pbPatchMem++ = 0x25; |
465 |
*(uint32_t *)pbPatchMem = (uint32_t)(RT_ALIGN_PT(pbPatchMem + 4, 8, uint8_t *) - (pbPatchMem + 4)); |
466 |
pbPatchMem = RT_ALIGN_PT(pbPatchMem + 4, 8, uint8_t *); |
467 |
*(uint64_t *)pbPatchMem = uAddr; |
468 |
pbPatchMem += sizeof(uint64_t); |
469 |
|
470 |
/* Push the return address onto stack. Difficult on amd64 without clobbering registers... */ |
471 |
uintptr_t uAddrReturn = (uintptr_t)pbPatchMem; |
472 |
*pbPatchMemPush++ = 0x68; /* push imm32 sign-extended as 64-bit*/ |
473 |
*(uint32_t *)pbPatchMemPush = RT_LO_U32(uAddrReturn); |
474 |
pbPatchMemPush += sizeof(uint32_t); |
475 |
*pbPatchMemPush++ = 0xc7; |
476 |
*pbPatchMemPush++ = 0x44; |
477 |
*pbPatchMemPush++ = 0x24; |
478 |
*pbPatchMemPush++ = 0x04; /* movl [RSP+4], imm32 */ |
479 |
*(uint32_t *)pbPatchMemPush = RT_HI_U32(uAddrReturn); |
480 |
} |
442 |
else |
481 |
else |
443 |
{ |
482 |
{ |
444 |
memcpy(pbPatchMem, pbTarget + offInsn, cbInstr); |
483 |
memcpy(pbPatchMem, pbTarget + offInsn, cbInstr); |