Skip to content

Commit

Permalink
asmlinkage: Add explicit __visible to drivers/*, lib/*, kernel/*
Browse files Browse the repository at this point in the history
As requested by Linus add explicit __visible to the asmlinkage users.
This marks functions visible to assembler.

Tree sweep for rest of tree.

Signed-off-by: Andi Kleen <[email protected]>
Link: http://lkml.kernel.org/r/[email protected]
Signed-off-by: H. Peter Anvin <[email protected]>
  • Loading branch information
Andi Kleen authored and H. Peter Anvin committed May 5, 2014
1 parent 2605fc2 commit 722a9f9
Show file tree
Hide file tree
Showing 9 changed files with 16 additions and 16 deletions.
2 changes: 1 addition & 1 deletion drivers/pnp/pnpbios/bioscalls.c
Original file line number Diff line number Diff line change
Expand Up @@ -37,7 +37,7 @@ __visible struct {
* kernel begins at offset 3GB...
*/

asmlinkage void pnp_bios_callfunc(void);
asmlinkage __visible void pnp_bios_callfunc(void);

__asm__(".text \n"
__ALIGN_STR "\n"
Expand Down
2 changes: 1 addition & 1 deletion init/main.c
Original file line number Diff line number Diff line change
Expand Up @@ -476,7 +476,7 @@ static void __init mm_init(void)
vmalloc_init();
}

asmlinkage void __init start_kernel(void)
asmlinkage __visible void __init start_kernel(void)
{
char * command_line;
extern const struct kernel_param __start___param[], __stop___param[];
Expand Down
2 changes: 1 addition & 1 deletion kernel/context_tracking.c
Original file line number Diff line number Diff line change
Expand Up @@ -120,7 +120,7 @@ void context_tracking_user_enter(void)
* instead of preempt_schedule() to exit user context if needed before
* calling the scheduler.
*/
asmlinkage void __sched notrace preempt_schedule_context(void)
asmlinkage __visible void __sched notrace preempt_schedule_context(void)
{
enum ctx_state prev_ctx;

Expand Down
2 changes: 1 addition & 1 deletion kernel/locking/lockdep.c
Original file line number Diff line number Diff line change
Expand Up @@ -4188,7 +4188,7 @@ void debug_show_held_locks(struct task_struct *task)
}
EXPORT_SYMBOL_GPL(debug_show_held_locks);

asmlinkage void lockdep_sys_exit(void)
asmlinkage __visible void lockdep_sys_exit(void)
{
struct task_struct *curr = current;

Expand Down
2 changes: 1 addition & 1 deletion kernel/power/snapshot.c
Original file line number Diff line number Diff line change
Expand Up @@ -1586,7 +1586,7 @@ swsusp_alloc(struct memory_bitmap *orig_bm, struct memory_bitmap *copy_bm,
return -ENOMEM;
}

asmlinkage int swsusp_save(void)
asmlinkage __visible int swsusp_save(void)
{
unsigned int nr_pages, nr_highmem;

Expand Down
4 changes: 2 additions & 2 deletions kernel/printk/printk.c
Original file line number Diff line number Diff line change
Expand Up @@ -1674,7 +1674,7 @@ EXPORT_SYMBOL(printk_emit);
*
* See the vsnprintf() documentation for format string extensions over C99.
*/
asmlinkage int printk(const char *fmt, ...)
asmlinkage __visible int printk(const char *fmt, ...)
{
va_list args;
int r;
Expand Down Expand Up @@ -1737,7 +1737,7 @@ void early_vprintk(const char *fmt, va_list ap)
}
}

asmlinkage void early_printk(const char *fmt, ...)
asmlinkage __visible void early_printk(const char *fmt, ...)
{
va_list ap;

Expand Down
10 changes: 5 additions & 5 deletions kernel/sched/core.c
Original file line number Diff line number Diff line change
Expand Up @@ -2192,7 +2192,7 @@ static inline void post_schedule(struct rq *rq)
* schedule_tail - first thing a freshly forked thread must call.
* @prev: the thread we just switched away from.
*/
asmlinkage void schedule_tail(struct task_struct *prev)
asmlinkage __visible void schedule_tail(struct task_struct *prev)
__releases(rq->lock)
{
struct rq *rq = this_rq();
Expand Down Expand Up @@ -2741,7 +2741,7 @@ static inline void sched_submit_work(struct task_struct *tsk)
blk_schedule_flush_plug(tsk);
}

asmlinkage void __sched schedule(void)
asmlinkage __visible void __sched schedule(void)
{
struct task_struct *tsk = current;

Expand All @@ -2751,7 +2751,7 @@ asmlinkage void __sched schedule(void)
EXPORT_SYMBOL(schedule);

#ifdef CONFIG_CONTEXT_TRACKING
asmlinkage void __sched schedule_user(void)
asmlinkage __visible void __sched schedule_user(void)
{
/*
* If we come here after a random call to set_need_resched(),
Expand Down Expand Up @@ -2783,7 +2783,7 @@ void __sched schedule_preempt_disabled(void)
* off of preempt_enable. Kernel preemptions off return from interrupt
* occur there and call schedule directly.
*/
asmlinkage void __sched notrace preempt_schedule(void)
asmlinkage __visible void __sched notrace preempt_schedule(void)
{
/*
* If there is a non-zero preempt_count or interrupts are disabled,
Expand Down Expand Up @@ -2813,7 +2813,7 @@ EXPORT_SYMBOL(preempt_schedule);
* Note, that this is called and return with irqs disabled. This will
* protect us against recursive calling from irq.
*/
asmlinkage void __sched preempt_schedule_irq(void)
asmlinkage __visible void __sched preempt_schedule_irq(void)
{
enum ctx_state prev_state;

Expand Down
4 changes: 2 additions & 2 deletions kernel/softirq.c
Original file line number Diff line number Diff line change
Expand Up @@ -223,7 +223,7 @@ static inline bool lockdep_softirq_start(void) { return false; }
static inline void lockdep_softirq_end(bool in_hardirq) { }
#endif

asmlinkage void __do_softirq(void)
asmlinkage __visible void __do_softirq(void)
{
unsigned long end = jiffies + MAX_SOFTIRQ_TIME;
unsigned long old_flags = current->flags;
Expand Down Expand Up @@ -299,7 +299,7 @@ asmlinkage void __do_softirq(void)
tsk_restore_flags(current, old_flags, PF_MEMALLOC);
}

asmlinkage void do_softirq(void)
asmlinkage __visible void do_softirq(void)
{
__u32 pending;
unsigned long flags;
Expand Down
4 changes: 2 additions & 2 deletions lib/dump_stack.c
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,7 @@ static void __dump_stack(void)
#ifdef CONFIG_SMP
static atomic_t dump_lock = ATOMIC_INIT(-1);

asmlinkage void dump_stack(void)
asmlinkage __visible void dump_stack(void)
{
int was_locked;
int old;
Expand Down Expand Up @@ -55,7 +55,7 @@ asmlinkage void dump_stack(void)
preempt_enable();
}
#else
asmlinkage void dump_stack(void)
asmlinkage __visible void dump_stack(void)
{
__dump_stack();
}
Expand Down

0 comments on commit 722a9f9

Please sign in to comment.