Skip to content

Commit

Permalink
Bug 1607014 Part 1 - Redirect functions by changing linker references…
Browse files Browse the repository at this point in the history
… instead of rewriting machine code, r=jlast.

Differential Revision: https://phabricator.services.mozilla.com/D58693

--HG--
extra : moz-landing-system : lando
  • Loading branch information
bhackett1024 committed Jan 5, 2020
1 parent 6d777c8 commit d799779
Show file tree
Hide file tree
Showing 15 changed files with 420 additions and 11,338 deletions.
290 changes: 34 additions & 256 deletions toolkit/recordreplay/Assembler.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -7,105 +7,50 @@
#include "Assembler.h"

#include "ProcessRecordReplay.h"
#include "udis86/types.h"

#include <sys/mman.h>

namespace mozilla {
namespace recordreplay {

Assembler::Assembler()
: mCursor(nullptr), mCursorEnd(nullptr), mCanAllocateStorage(true) {}

Assembler::Assembler(uint8_t* aStorage, size_t aSize)
: mCursor(aStorage),
mCursorEnd(aStorage + aSize),
mCanAllocateStorage(false) {}

Assembler::~Assembler() {
// Patch each jump to the point where the jump's target was copied, if there
// is one.
for (auto pair : mJumps) {
uint8_t* source = pair.first;
uint8_t* target = pair.second;

for (auto copyPair : mCopiedInstructions) {
if (copyPair.first == target) {
PatchJump(source, copyPair.second);
break;
}
}
}
}
mCursorEnd(aStorage + aSize) {}

void Assembler::NoteOriginalInstruction(uint8_t* aIp) {
mCopiedInstructions.emplaceBack(aIp, Current());
}
// The maximum byte length of an x86/x64 instruction.
static const size_t MaximumInstructionLength = 15;

void Assembler::Advance(size_t aSize) {
MOZ_RELEASE_ASSERT(aSize <= MaximumAdvance);
mCursor += aSize;
MOZ_RELEASE_ASSERT(mCursor + MaximumInstructionLength <= mCursorEnd);
}

uint8_t* Assembler::Current() {
// Reallocate the buffer if there is not enough space. We need enough for the
// maximum space used by any of the assembling functions, as well as for a
// following jump for fallthrough to the next allocated space.
if (size_t(mCursorEnd - mCursor) <= MaximumAdvance + JumpBytes) {
MOZ_RELEASE_ASSERT(mCanAllocateStorage);

// Allocate some writable, executable memory.
static const size_t BufferSize = PageSize * 32;
uint8_t* buffer = new uint8_t[BufferSize];
UnprotectExecutableMemory(buffer, BufferSize);

if (mCursor) {
// Patch a jump for fallthrough from the last allocation.
MOZ_RELEASE_ASSERT(size_t(mCursorEnd - mCursor) >= JumpBytes);
PatchJump(mCursor, buffer);
}

mCursor = buffer;
mCursorEnd = &buffer[BufferSize];
}

return mCursor;
}

static void Push16(uint8_t** aIp, uint16_t aValue) {
(*aIp)[0] = 0x66;
(*aIp)[1] = 0x68;
*reinterpret_cast<uint16_t*>(*aIp + 2) = aValue;
(*aIp) += 4;
void Assembler::Jump(void* aTarget) {
PushImmediate(aTarget);
Return();
}

static void PushImmediateAtIp(uint8_t** aIp, void* aValue) {
void Assembler::PushImmediate(void* aValue) {
// Push the target literal onto the stack, 2 bytes at a time. This is
// apparently the best way of getting an arbitrary 8 byte literal onto the
// stack, as 4 byte literals we push will be sign extended to 8 bytes.
size_t nvalue = reinterpret_cast<size_t>(aValue);
Push16(aIp, nvalue >> 48);
Push16(aIp, nvalue >> 32);
Push16(aIp, nvalue >> 16);
Push16(aIp, nvalue);
}

/* static */
void Assembler::PatchJump(uint8_t* aIp, void* aTarget) {
PushImmediateAtIp(&aIp, aTarget);
*aIp = 0xC3; // ret
Push16(nvalue >> 48);
Push16(nvalue >> 32);
Push16(nvalue >> 16);
Push16(nvalue);
}

void Assembler::Jump(void* aTarget) {
PatchJump(Current(), aTarget);
mJumps.emplaceBack(Current(), (uint8_t*)aTarget);
Advance(JumpBytes);
}

void Assembler::PushImmediate(void* aValue) {
void Assembler::Push16(uint16_t aValue) {
uint8_t* ip = Current();
PushImmediateAtIp(&ip, aValue);
Advance(PushImmediateBytes);
ip[0] = 0x66;
ip[1] = 0x68;
*reinterpret_cast<uint16_t*>(ip + 2) = aValue;
Advance(4);
}

void Assembler::Return() {
Expand All @@ -116,207 +61,40 @@ void Assembler::Breakpoint() {
NewInstruction(0xCC);
}

static uint8_t OppositeJump(uint8_t aOpcode) {
// Get the opposite single byte jump opcode for a one or two byte conditional
// jump. Opposite opcodes are adjacent, e.g. 0x7C -> jl and 0x7D -> jge.
if (aOpcode >= 0x80 && aOpcode <= 0x8F) {
aOpcode -= 0x10;
} else {
MOZ_RELEASE_ASSERT(aOpcode >= 0x70 && aOpcode <= 0x7F);
}
return (aOpcode & 1) ? aOpcode - 1 : aOpcode + 1;
}

void Assembler::ConditionalJump(uint8_t aCode, void* aTarget) {
uint8_t* ip = Current();
ip[0] = OppositeJump(aCode);
ip[1] = (uint8_t)JumpBytes;
Advance(2);
Jump(aTarget);
}

void Assembler::CopyInstruction(uint8_t* aIp, size_t aSize) {
MOZ_RELEASE_ASSERT(aSize <= MaximumInstructionLength);
memcpy(Current(), aIp, aSize);
Advance(aSize);
}

void Assembler::PushRax() { NewInstruction(0x50); }

void Assembler::PopRax() { NewInstruction(0x58); }

void Assembler::JumpToRax() { NewInstruction(0xFF, 0xE0); }

void Assembler::CallRax() { NewInstruction(0xFF, 0xD0); }

void Assembler::LoadRax(size_t aWidth) {
switch (aWidth) {
case 1:
NewInstruction(0x8A, 0x00);
break;
case 2:
NewInstruction(0x66, 0x8B, 0x00);
break;
case 4:
NewInstruction(0x8B, 0x00);
break;
case 8:
NewInstruction(0x48, 0x8B, 0x00);
break;
default:
MOZ_CRASH();
}
}

void Assembler::CompareRaxWithTopOfStack() {
NewInstruction(0x48, 0x39, 0x04, 0x24);
}

void Assembler::CompareTopOfStackWithRax() {
NewInstruction(0x48, 0x3B, 0x04, 0x24);
}

void Assembler::PushRbx() { NewInstruction(0x53); }

void Assembler::PopRbx() { NewInstruction(0x5B); }

void Assembler::PopRegister(/*ud_type*/ int aRegister) {
MOZ_RELEASE_ASSERT(aRegister == NormalizeRegister(aRegister));

if (aRegister <= UD_R_RDI) {
NewInstruction(0x58 + aRegister - UD_R_RAX);
void Assembler::PopRegister(Register aRegister) {
if (aRegister <= Register::RDI) {
NewInstruction(0x58 + (int)aRegister - (int)Register::RAX);
} else {
NewInstruction(0x41, 0x58 + aRegister - UD_R_R8);
}
}

void Assembler::StoreRbxToRax(size_t aWidth) {
switch (aWidth) {
case 1:
NewInstruction(0x88, 0x18);
break;
case 2:
NewInstruction(0x66, 0x89, 0x18);
break;
case 4:
NewInstruction(0x89, 0x18);
break;
case 8:
NewInstruction(0x48, 0x89, 0x18);
break;
default:
MOZ_CRASH();
}
}

void Assembler::CompareValueWithRax(uint8_t aValue, size_t aWidth) {
switch (aWidth) {
case 1:
NewInstruction(0x3C, aValue);
break;
case 2:
NewInstruction(0x66, 0x83, 0xF8, aValue);
break;
case 4:
NewInstruction(0x83, 0xF8, aValue);
break;
case 8:
NewInstruction(0x48, 0x83, 0xF8, aValue);
break;
default:
MOZ_CRASH();
NewInstruction(0x41, 0x58 + (int)aRegister - (int)Register::R8);
}
}

static const size_t MoveImmediateBytes = 10;

/* static */
void Assembler::PatchMoveImmediateToRax(uint8_t* aIp, void* aValue) {
aIp[0] = 0x40 | (1 << 3);
aIp[1] = 0xB8;
*reinterpret_cast<void**>(aIp + 2) = aValue;
}

void Assembler::MoveImmediateToRax(void* aValue) {
PatchMoveImmediateToRax(Current(), aValue);
Advance(MoveImmediateBytes);
}

void Assembler::MoveRaxToRegister(/*ud_type*/ int aRegister) {
MOZ_RELEASE_ASSERT(aRegister == NormalizeRegister(aRegister));

if (aRegister <= UD_R_RDI) {
NewInstruction(0x48, 0x89, 0xC0 + aRegister - UD_R_RAX);
} else {
NewInstruction(0x49, 0x89, 0xC0 + aRegister - UD_R_R8);
}
uint8_t* ip = Current();
ip[0] = 0x40 | (1 << 3);
ip[1] = 0xB8;
*reinterpret_cast<void**>(ip + 2) = aValue;
Advance(10);
}

void Assembler::MoveRegisterToRax(/*ud_type*/ int aRegister) {
MOZ_RELEASE_ASSERT(aRegister == NormalizeRegister(aRegister));

if (aRegister <= UD_R_RDI) {
NewInstruction(0x48, 0x89, 0xC0 + (aRegister - UD_R_RAX) * 8);
void Assembler::MoveRaxToRegister(Register aRegister) {
if (aRegister <= Register::RDI) {
NewInstruction(0x48, 0x89, 0xC0 + (int)aRegister - (int)Register::RAX);
} else {
NewInstruction(0x4C, 0x89, 0xC0 + (aRegister - UD_R_R8) * 8);
NewInstruction(0x49, 0x89, 0xC0 + (int)aRegister - (int)Register::R8);
}
}

void Assembler::ExchangeByteRegisterWithAddressAtRbx(
/*ud_type*/ int aRegister) {
MOZ_RELEASE_ASSERT(aRegister == NormalizeRegister(aRegister));

if (aRegister <= UD_R_RDI) {
NewInstruction(0x86, 0x03 + (aRegister - UD_R_RAX) * 8);
void Assembler::MoveRegisterToRax(Register aRegister) {
if (aRegister <= Register::RDI) {
NewInstruction(0x48, 0x89, 0xC0 + ((int)aRegister - (int)Register::RAX) * 8);
} else {
NewInstruction(0x44, 0x86, 0x03 + (aRegister - UD_R_R8) * 8);
}
}

void Assembler::ExchangeByteRbxWithAddressAtRax() {
NewInstruction(0x86, 0x18);
}

/* static */ /*ud_type*/
int Assembler::NormalizeRegister(
/*ud_type*/ int aRegister) {
if (aRegister >= UD_R_AL && aRegister <= UD_R_R15B) {
return aRegister - UD_R_AL + UD_R_RAX;
}
if (aRegister >= UD_R_AX && aRegister <= UD_R_R15W) {
return aRegister - UD_R_AX + UD_R_RAX;
}
if (aRegister >= UD_R_EAX && aRegister <= UD_R_R15D) {
return aRegister - UD_R_EAX + UD_R_RAX;
NewInstruction(0x4C, 0x89, 0xC0 + ((int)aRegister - (int)Register::R8) * 8);
}
if (aRegister >= UD_R_RAX && aRegister <= UD_R_R15) {
return aRegister;
}
return UD_NONE;
}

/* static */
bool Assembler::CanPatchShortJump(uint8_t* aIp, void* aTarget) {
return (aIp + 2 - 128 <= aTarget) && (aIp + 2 + 127 >= aTarget);
}

/* static */
void Assembler::PatchShortJump(uint8_t* aIp, void* aTarget) {
MOZ_RELEASE_ASSERT(CanPatchShortJump(aIp, aTarget));
aIp[0] = 0xEB;
aIp[1] = uint8_t(static_cast<uint8_t*>(aTarget) - aIp - 2);
}

/* static */
void Assembler::PatchJumpClobberRax(uint8_t* aIp, void* aTarget) {
PatchMoveImmediateToRax(aIp, aTarget);
aIp[10] = 0x50; // push %rax
aIp[11] = 0xC3; // ret
}

/* static */
void Assembler::PatchClobber(uint8_t* aIp) {
aIp[0] = 0xCC; // int3
}

static uint8_t* PageStart(uint8_t* aPtr) {
Expand Down
Loading

0 comments on commit d799779

Please sign in to comment.