Skip to content

Commit

Permalink
powerpc/perf: Consolidate perf_callchain_user_[64|32]()
Browse files Browse the repository at this point in the history
perf_callchain_user_64() and perf_callchain_user_32() are nearly
identical. Consolidate into one function with thin wrappers.

Suggested-by: Nicholas Piggin <[email protected]>
Signed-off-by: Michal Suchanek <[email protected]>
[mpe: Adapt to copy_from_user_nofault(), minor formatting]
Signed-off-by: Michael Ellerman <[email protected]>
Link: https://lore.kernel.org/r/[email protected]
  • Loading branch information
hramrach authored and mpe committed Jul 30, 2020
1 parent a0ff72f commit d3a133a
Show file tree
Hide file tree
Showing 3 changed files with 29 additions and 30 deletions.
25 changes: 24 additions & 1 deletion arch/powerpc/perf/callchain.h
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@
#ifndef _POWERPC_PERF_CALLCHAIN_H
#define _POWERPC_PERF_CALLCHAIN_H

int read_user_stack_slow(void __user *ptr, void *buf, int nb);
int read_user_stack_slow(const void __user *ptr, void *buf, int nb);
void perf_callchain_user_64(struct perf_callchain_entry_ctx *entry,
struct pt_regs *regs);
void perf_callchain_user_32(struct perf_callchain_entry_ctx *entry,
Expand All @@ -16,4 +16,27 @@ static inline bool invalid_user_sp(unsigned long sp)
return (!sp || (sp & mask) || (sp > top));
}

/*
* On 32-bit we just access the address and let hash_page create a
* HPTE if necessary, so there is no need to fall back to reading
* the page tables. Since this is called at interrupt level,
* do_page_fault() won't treat a DSI as a page fault.
*/
static inline int __read_user_stack(const void __user *ptr, void *ret,
size_t size)
{
unsigned long addr = (unsigned long)ptr;
int rc;

if (addr > TASK_SIZE - size || (addr & (size - 1)))
return -EFAULT;

rc = copy_from_user_nofault(ret, ptr, size);

if (IS_ENABLED(CONFIG_PPC64) && rc)
return read_user_stack_slow(ptr, ret, size);

return rc;
}

#endif /* _POWERPC_PERF_CALLCHAIN_H */
21 changes: 2 additions & 19 deletions arch/powerpc/perf/callchain_32.c
Original file line number Diff line number Diff line change
Expand Up @@ -30,26 +30,9 @@

#endif /* CONFIG_PPC64 */

/*
* On 32-bit we just access the address and let hash_page create a
* HPTE if necessary, so there is no need to fall back to reading
* the page tables. Since this is called at interrupt level,
* do_page_fault() won't treat a DSI as a page fault.
*/
static int read_user_stack_32(unsigned int __user *ptr, unsigned int *ret)
static int read_user_stack_32(const unsigned int __user *ptr, unsigned int *ret)
{
int rc;

if ((unsigned long)ptr > TASK_SIZE - sizeof(unsigned int) ||
((unsigned long)ptr & 3))
return -EFAULT;

rc = copy_from_user_nofault(ret, ptr, sizeof(*ret));

if (IS_ENABLED(CONFIG_PPC64) && rc)
return read_user_stack_slow(ptr, ret, 4);

return rc;
return __read_user_stack(ptr, ret, sizeof(*ret));
}

/*
Expand Down
13 changes: 3 additions & 10 deletions arch/powerpc/perf/callchain_64.c
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,7 @@
* interrupt context, so if the access faults, we read the page tables
* to find which page (if any) is mapped and access it directly.
*/
int read_user_stack_slow(void __user *ptr, void *buf, int nb)
int read_user_stack_slow(const void __user *ptr, void *buf, int nb)
{

unsigned long addr = (unsigned long) ptr;
Expand All @@ -44,16 +44,9 @@ int read_user_stack_slow(void __user *ptr, void *buf, int nb)
return -EFAULT;
}

static int read_user_stack_64(unsigned long __user *ptr, unsigned long *ret)
static int read_user_stack_64(const unsigned long __user *ptr, unsigned long *ret)
{
if ((unsigned long)ptr > TASK_SIZE - sizeof(unsigned long) ||
((unsigned long)ptr & 7))
return -EFAULT;

if (!copy_from_user_nofault(ret, ptr, sizeof(*ret)))
return 0;

return read_user_stack_slow(ptr, ret, 8);
return __read_user_stack(ptr, ret, sizeof(*ret));
}

/*
Expand Down

0 comments on commit d3a133a

Please sign in to comment.