forked from osandov/drgn
-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathplatform.h
235 lines (209 loc) · 8.31 KB
/
platform.h
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
// Copyright (c) Meta Platforms, Inc. and affiliates.
// SPDX-License-Identifier: GPL-3.0-or-later
#ifndef DRGN_PLATFORM_H
#define DRGN_PLATFORM_H
#include <gelf.h>
#include <inttypes.h>
#include "cfi.h"
#include "drgn.h"
#include "util.h"
struct drgn_orc_entry;
struct drgn_register_state;
struct drgn_register {
const char * const *names;
size_t num_names;
drgn_register_number regno;
};
struct drgn_register_layout {
uint32_t offset;
uint32_t size;
};
// This is an ugly layering violation needed for DW_CFA_AARCH64_negate_ra_state.
// We enforce that it stays up to date with a static_assert() in arch_aarch64.c.
#define DRGN_AARCH64_RA_SIGN_STATE_REGNO 0
/* ELF section to apply relocations to. */
struct drgn_relocating_section {
char *buf;
size_t buf_size;
uint64_t addr;
bool bswap;
};
extern struct drgn_error drgn_invalid_relocation_offset;
/*
* Apply an ELF relocation as:
*
* - `*dst = addend + *r_addend` if `r_addend` is not `NULL` (for `ElfN_Rela`)
* - `*dst += addend` if `r_addend` is `NULL` (for `ElfN_Rel`)
*
* Where `dst = (uintN_t *)(relocating->buf + r_offset)`.
*
* This checks bounds and handles unaligned destinations and byte swapping. It
* does not check for overflow.
*/
struct drgn_error *
drgn_reloc_add64(const struct drgn_relocating_section *relocating,
uint64_t r_offset, const int64_t *r_addend, uint64_t addend);
struct drgn_error *
drgn_reloc_add32(const struct drgn_relocating_section *relocating,
uint64_t r_offset, const int64_t *r_addend, uint32_t addend);
struct drgn_error *
drgn_reloc_add16(const struct drgn_relocating_section *relocating,
uint64_t r_offset, const int64_t *r_addend, uint16_t addend);
struct drgn_error *
drgn_reloc_add8(const struct drgn_relocating_section *relocating,
uint64_t r_offset, const int64_t *r_addend, uint8_t addend);
#define DRGN_UNKNOWN_RELOCATION_TYPE(r_type) \
drgn_error_format(DRGN_ERROR_OTHER, \
"unknown relocation type %" PRIu32 " in %s; " \
"please report this to %s", \
(r_type), __func__, PACKAGE_BUGREPORT)
/*
* Apply an ELF relocation. If @p r_addend is `NULL`, then this is an `ElfN_Rel`
* relocation. Otherwise, this is an `ElfN_Rela` relocation.
*/
typedef struct drgn_error *
apply_elf_reloc_fn(const struct drgn_relocating_section *relocating,
uint64_t r_offset, uint32_t r_type, const int64_t *r_addend,
uint64_t sym_value);
/* Page table iterator. */
struct pgtable_iterator {
/* Address of the top-level page table to iterate. */
uint64_t pgtable;
/* Current virtual address to translate. */
uint64_t virt_addr;
};
/*
* Translate the current virtual address from a page table iterator.
*
* Abstractly, a virtual address lies in a range of addresses in the address
* space. A range may be a mapped page, a page table gap, or a range of invalid
* addresses (e.g., non-canonical addresses on x86-64). This finds the range
* containing the current virtual address, returns the first virtual address of
* that range and the physical address it maps to (if any), and updates the
* current virtual address to the end of the range.
*
* This does not merge contiguous ranges. For example, if two adjacent mapped
* pages have adjacent physical addresses, this returns each page separately.
* This makes it possible to distinguish between contiguous pages and "huge
* pages" on architectures that support different page sizes. Similarly, if two
* adjacent entries at level 2 of the page table are empty, this returns each
* gap separately.
*
* @param[in] it Iterator.
* @param[out] virt_addr_ret Returned first virtual address in the range
* containing the current virtual address.
* @param[out] phys_addr_ret Returned physical address that @p virt_addr_ret
* maps to, or @c UINT64_MAX if it is not mapped.
*/
typedef struct drgn_error *
(pgtable_iterator_next_fn)(struct drgn_program *prog,
struct pgtable_iterator *it, uint64_t *virt_addr_ret,
uint64_t *phys_addr_ret);
struct drgn_architecture_info {
const char *name;
enum drgn_architecture arch;
enum drgn_platform_flags default_flags;
/* API-visible registers. */
const struct drgn_register *registers;
/* Number of API-visible registers. */
size_t num_registers;
/*
* Return the API-visible register with the given name, or @c NULL if it
* is not recognized.
*/
const struct drgn_register *(*register_by_name)(const char *name);
/* Internal register layouts indexed by internal register number. */
const struct drgn_register_layout *register_layout;
/*
* Return the internal register number for the given DWARF register
* number, or @ref DRGN_REGISTER_NUMBER_UNKNOWN if it is not recognized.
*/
drgn_register_number (*dwarf_regno_to_internal)(uint64_t);
/* CFI row containing default rules for DWARF CFI. */
const struct drgn_cfi_row *default_dwarf_cfi_row;
struct drgn_error *(*orc_to_cfi)(const struct drgn_orc_entry *,
struct drgn_cfi_row **, bool *,
drgn_register_number *);
/*
* Try to unwind a stack frame if CFI wasn't found. Returns &drgn_stop
* if we couldn't.
*/
struct drgn_error *(*fallback_unwind)(struct drgn_program *,
struct drgn_register_state *,
struct drgn_register_state **);
void (*demangle_return_address)(struct drgn_program *,
struct drgn_register_state *,
drgn_register_number);
/* Given pt_regs as a value buffer object. */
struct drgn_error *(*pt_regs_get_initial_registers)(const struct drgn_object *,
struct drgn_register_state **);
struct drgn_error *(*prstatus_get_initial_registers)(struct drgn_program *,
const void *,
size_t,
struct drgn_register_state **);
struct drgn_error *(*linux_kernel_get_initial_registers)(const struct drgn_object *,
struct drgn_register_state **);
apply_elf_reloc_fn *apply_elf_reloc;
struct drgn_error *(*linux_kernel_live_direct_mapping_fallback)(struct drgn_program *,
uint64_t *,
uint64_t *);
/* Allocate a Linux kernel page table iterator. */
struct drgn_error *(*linux_kernel_pgtable_iterator_create)(struct drgn_program *,
struct pgtable_iterator **);
/* Destroy a Linux kernel page table iterator. */
void (*linux_kernel_pgtable_iterator_destroy)(struct pgtable_iterator *);
/* (Re)initialize a Linux kernel page table iterator. */
void (*linux_kernel_pgtable_iterator_init)(struct drgn_program *,
struct pgtable_iterator *);
/* Iterate a (user or kernel) page table in the Linux kernel. */
pgtable_iterator_next_fn *linux_kernel_pgtable_iterator_next;
};
const struct drgn_register *drgn_register_by_name_unknown(const char *name);
extern const struct drgn_architecture_info arch_info_unknown;
extern const struct drgn_architecture_info arch_info_x86_64;
extern const struct drgn_architecture_info arch_info_i386;
extern const struct drgn_architecture_info arch_info_aarch64;
extern const struct drgn_architecture_info arch_info_arm;
extern const struct drgn_architecture_info arch_info_ppc64;
extern const struct drgn_architecture_info arch_info_riscv64;
extern const struct drgn_architecture_info arch_info_riscv32;
struct drgn_platform {
const struct drgn_architecture_info *arch;
enum drgn_platform_flags flags;
};
static inline bool
drgn_platform_is_little_endian(const struct drgn_platform *platform)
{
return platform->flags & DRGN_PLATFORM_IS_LITTLE_ENDIAN;
}
static inline bool drgn_platform_bswap(const struct drgn_platform *platform)
{
return drgn_platform_is_little_endian(platform) != HOST_LITTLE_ENDIAN;
}
static inline bool drgn_platform_is_64_bit(const struct drgn_platform *platform)
{
return platform->flags & DRGN_PLATFORM_IS_64_BIT;
}
static inline uint8_t
drgn_platform_address_size(const struct drgn_platform *platform)
{
return drgn_platform_is_64_bit(platform) ? 8 : 4;
}
static inline uint64_t
drgn_platform_address_mask(const struct drgn_platform *platform)
{
return drgn_platform_is_64_bit(platform) ? UINT64_MAX : UINT32_MAX;
}
/**
* Initialize a @ref drgn_platform from an architecture, word size, and
* endianness.
*
* The default flags for the architecture are used other than the word size and
* endianness.
*/
void drgn_platform_from_arch(const struct drgn_architecture_info *arch,
bool is_64_bit, bool is_little_endian,
struct drgn_platform *ret);
/** Initialize a @ref drgn_platform from an ELF header. */
void drgn_platform_from_elf(GElf_Ehdr *ehdr, struct drgn_platform *ret);
#endif /* DRGN_PLATFORM_H */