Skip to content

Commit

Permalink
kernel: remove fastcall in kernel/*
Browse files Browse the repository at this point in the history
[[email protected]: coding-style fixes]
Signed-off-by: Harvey Harrison <[email protected]>
Acked-by: Ingo Molnar <[email protected]>
Signed-off-by: Andrew Morton <[email protected]>
Signed-off-by: Linus Torvalds <[email protected]>
  • Loading branch information
hharrison authored and Linus Torvalds committed Feb 8, 2008
1 parent fc9b52c commit 7ad5b3a
Show file tree
Hide file tree
Showing 12 changed files with 67 additions and 68 deletions.
4 changes: 2 additions & 2 deletions kernel/exit.c
Original file line number Diff line number Diff line change
Expand Up @@ -458,7 +458,7 @@ struct files_struct *get_files_struct(struct task_struct *task)
return files;
}

void fastcall put_files_struct(struct files_struct *files)
void put_files_struct(struct files_struct *files)
{
struct fdtable *fdt;

Expand Down Expand Up @@ -887,7 +887,7 @@ static inline void exit_child_reaper(struct task_struct *tsk)
zap_pid_ns_processes(tsk->nsproxy->pid_ns);
}

fastcall NORET_TYPE void do_exit(long code)
NORET_TYPE void do_exit(long code)
{
struct task_struct *tsk = current;
int group_dead;
Expand Down
2 changes: 1 addition & 1 deletion kernel/fork.c
Original file line number Diff line number Diff line change
Expand Up @@ -390,7 +390,7 @@ struct mm_struct * mm_alloc(void)
* is dropped: either by a lazy thread or by
* mmput. Free the page directory and the mm.
*/
void fastcall __mmdrop(struct mm_struct *mm)
void __mmdrop(struct mm_struct *mm)
{
BUG_ON(mm == &init_mm);
mm_free_pgd(mm);
Expand Down
10 changes: 5 additions & 5 deletions kernel/irq/chip.c
Original file line number Diff line number Diff line change
Expand Up @@ -286,7 +286,7 @@ static inline void mask_ack_irq(struct irq_desc *desc, int irq)
* Note: The caller is expected to handle the ack, clear, mask and
* unmask issues if necessary.
*/
void fastcall
void
handle_simple_irq(unsigned int irq, struct irq_desc *desc)
{
struct irqaction *action;
Expand Down Expand Up @@ -327,7 +327,7 @@ handle_simple_irq(unsigned int irq, struct irq_desc *desc)
* it after the associated handler has acknowledged the device, so the
* interrupt line is back to inactive.
*/
void fastcall
void
handle_level_irq(unsigned int irq, struct irq_desc *desc)
{
unsigned int cpu = smp_processor_id();
Expand Down Expand Up @@ -375,7 +375,7 @@ handle_level_irq(unsigned int irq, struct irq_desc *desc)
* for modern forms of interrupt handlers, which handle the flow
* details in hardware, transparently.
*/
void fastcall
void
handle_fasteoi_irq(unsigned int irq, struct irq_desc *desc)
{
unsigned int cpu = smp_processor_id();
Expand Down Expand Up @@ -434,7 +434,7 @@ handle_fasteoi_irq(unsigned int irq, struct irq_desc *desc)
* the handler was running. If all pending interrupts are handled, the
* loop is left.
*/
void fastcall
void
handle_edge_irq(unsigned int irq, struct irq_desc *desc)
{
const unsigned int cpu = smp_processor_id();
Expand Down Expand Up @@ -505,7 +505,7 @@ handle_edge_irq(unsigned int irq, struct irq_desc *desc)
*
* Per CPU interrupts on SMP machines without locking requirements
*/
void fastcall
void
handle_percpu_irq(unsigned int irq, struct irq_desc *desc)
{
irqreturn_t action_ret;
Expand Down
4 changes: 2 additions & 2 deletions kernel/irq/handle.c
Original file line number Diff line number Diff line change
Expand Up @@ -25,7 +25,7 @@
*
* Handles spurious and unhandled IRQ's. It also prints a debugmessage.
*/
void fastcall
void
handle_bad_irq(unsigned int irq, struct irq_desc *desc)
{
print_irq_desc(irq, desc);
Expand Down Expand Up @@ -163,7 +163,7 @@ irqreturn_t handle_IRQ_event(unsigned int irq, struct irqaction *action)
* This is the original x86 implementation which is used for every
* interrupt type.
*/
fastcall unsigned int __do_IRQ(unsigned int irq)
unsigned int __do_IRQ(unsigned int irq)
{
struct irq_desc *desc = irq_desc + irq;
struct irqaction *action;
Expand Down
2 changes: 1 addition & 1 deletion kernel/mutex-debug.c
Original file line number Diff line number Diff line change
Expand Up @@ -107,7 +107,7 @@ void debug_mutex_init(struct mutex *lock, const char *name,
* use of the mutex is forbidden. The mutex must not be locked when
* this function is called.
*/
void fastcall mutex_destroy(struct mutex *lock)
void mutex_destroy(struct mutex *lock)
{
DEBUG_LOCKS_WARN_ON(mutex_is_locked(lock));
lock->magic = NULL;
Expand Down
29 changes: 14 additions & 15 deletions kernel/mutex.c
Original file line number Diff line number Diff line change
Expand Up @@ -58,7 +58,7 @@ EXPORT_SYMBOL(__mutex_init);
* We also put the fastpath first in the kernel image, to make sure the
* branch is predicted by the CPU as default-untaken.
*/
static void fastcall noinline __sched
static void noinline __sched
__mutex_lock_slowpath(atomic_t *lock_count);

/***
Expand All @@ -82,7 +82,7 @@ __mutex_lock_slowpath(atomic_t *lock_count);
*
* This function is similar to (but not equivalent to) down().
*/
void inline fastcall __sched mutex_lock(struct mutex *lock)
void inline __sched mutex_lock(struct mutex *lock)
{
might_sleep();
/*
Expand All @@ -95,8 +95,7 @@ void inline fastcall __sched mutex_lock(struct mutex *lock)
EXPORT_SYMBOL(mutex_lock);
#endif

static void fastcall noinline __sched
__mutex_unlock_slowpath(atomic_t *lock_count);
static noinline void __sched __mutex_unlock_slowpath(atomic_t *lock_count);

/***
* mutex_unlock - release the mutex
Expand All @@ -109,7 +108,7 @@ __mutex_unlock_slowpath(atomic_t *lock_count);
*
* This function is similar to (but not equivalent to) up().
*/
void fastcall __sched mutex_unlock(struct mutex *lock)
void __sched mutex_unlock(struct mutex *lock)
{
/*
* The unlocking fastpath is the 0->1 transition from 'locked'
Expand Down Expand Up @@ -234,7 +233,7 @@ EXPORT_SYMBOL_GPL(mutex_lock_interruptible_nested);
/*
* Release the lock, slowpath:
*/
static fastcall inline void
static inline void
__mutex_unlock_common_slowpath(atomic_t *lock_count, int nested)
{
struct mutex *lock = container_of(lock_count, struct mutex, count);
Expand Down Expand Up @@ -271,7 +270,7 @@ __mutex_unlock_common_slowpath(atomic_t *lock_count, int nested)
/*
* Release the lock, slowpath:
*/
static fastcall noinline void
static noinline void
__mutex_unlock_slowpath(atomic_t *lock_count)
{
__mutex_unlock_common_slowpath(lock_count, 1);
Expand All @@ -282,10 +281,10 @@ __mutex_unlock_slowpath(atomic_t *lock_count)
* Here come the less common (and hence less performance-critical) APIs:
* mutex_lock_interruptible() and mutex_trylock().
*/
static int fastcall noinline __sched
static noinline int __sched
__mutex_lock_killable_slowpath(atomic_t *lock_count);

static noinline int fastcall __sched
static noinline int __sched
__mutex_lock_interruptible_slowpath(atomic_t *lock_count);

/***
Expand All @@ -299,7 +298,7 @@ __mutex_lock_interruptible_slowpath(atomic_t *lock_count);
*
* This function is similar to (but not equivalent to) down_interruptible().
*/
int fastcall __sched mutex_lock_interruptible(struct mutex *lock)
int __sched mutex_lock_interruptible(struct mutex *lock)
{
might_sleep();
return __mutex_fastpath_lock_retval
Expand All @@ -308,31 +307,31 @@ int fastcall __sched mutex_lock_interruptible(struct mutex *lock)

EXPORT_SYMBOL(mutex_lock_interruptible);

int fastcall __sched mutex_lock_killable(struct mutex *lock)
int __sched mutex_lock_killable(struct mutex *lock)
{
might_sleep();
return __mutex_fastpath_lock_retval
(&lock->count, __mutex_lock_killable_slowpath);
}
EXPORT_SYMBOL(mutex_lock_killable);

static void fastcall noinline __sched
static noinline void __sched
__mutex_lock_slowpath(atomic_t *lock_count)
{
struct mutex *lock = container_of(lock_count, struct mutex, count);

__mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, 0, _RET_IP_);
}

static int fastcall noinline __sched
static noinline int __sched
__mutex_lock_killable_slowpath(atomic_t *lock_count)
{
struct mutex *lock = container_of(lock_count, struct mutex, count);

return __mutex_lock_common(lock, TASK_KILLABLE, 0, _RET_IP_);
}

static noinline int fastcall __sched
static noinline int __sched
__mutex_lock_interruptible_slowpath(atomic_t *lock_count)
{
struct mutex *lock = container_of(lock_count, struct mutex, count);
Expand Down Expand Up @@ -381,7 +380,7 @@ static inline int __mutex_trylock_slowpath(atomic_t *lock_count)
* This function must not be used in interrupt context. The
* mutex must be released by the same task that acquired it.
*/
int fastcall __sched mutex_trylock(struct mutex *lock)
int __sched mutex_trylock(struct mutex *lock)
{
return __mutex_fastpath_trylock(&lock->count,
__mutex_trylock_slowpath);
Expand Down
18 changes: 9 additions & 9 deletions kernel/pid.c
Original file line number Diff line number Diff line change
Expand Up @@ -111,7 +111,7 @@ EXPORT_SYMBOL(is_container_init);

static __cacheline_aligned_in_smp DEFINE_SPINLOCK(pidmap_lock);

static fastcall void free_pidmap(struct pid_namespace *pid_ns, int pid)
static void free_pidmap(struct pid_namespace *pid_ns, int pid)
{
struct pidmap *map = pid_ns->pidmap + pid / BITS_PER_PAGE;
int offset = pid & BITS_PER_PAGE_MASK;
Expand Down Expand Up @@ -198,7 +198,7 @@ int next_pidmap(struct pid_namespace *pid_ns, int last)
return -1;
}

fastcall void put_pid(struct pid *pid)
void put_pid(struct pid *pid)
{
struct pid_namespace *ns;

Expand All @@ -220,7 +220,7 @@ static void delayed_put_pid(struct rcu_head *rhp)
put_pid(pid);
}

fastcall void free_pid(struct pid *pid)
void free_pid(struct pid *pid)
{
/* We can be called with write_lock_irq(&tasklist_lock) held */
int i;
Expand Down Expand Up @@ -286,7 +286,7 @@ struct pid *alloc_pid(struct pid_namespace *ns)
goto out;
}

struct pid * fastcall find_pid_ns(int nr, struct pid_namespace *ns)
struct pid *find_pid_ns(int nr, struct pid_namespace *ns)
{
struct hlist_node *elem;
struct upid *pnr;
Expand Down Expand Up @@ -316,7 +316,7 @@ EXPORT_SYMBOL_GPL(find_pid);
/*
* attach_pid() must be called with the tasklist_lock write-held.
*/
int fastcall attach_pid(struct task_struct *task, enum pid_type type,
int attach_pid(struct task_struct *task, enum pid_type type,
struct pid *pid)
{
struct pid_link *link;
Expand All @@ -328,7 +328,7 @@ int fastcall attach_pid(struct task_struct *task, enum pid_type type,
return 0;
}

void fastcall detach_pid(struct task_struct *task, enum pid_type type)
void detach_pid(struct task_struct *task, enum pid_type type)
{
struct pid_link *link;
struct pid *pid;
Expand All @@ -348,15 +348,15 @@ void fastcall detach_pid(struct task_struct *task, enum pid_type type)
}

/* transfer_pid is an optimization of attach_pid(new), detach_pid(old) */
void fastcall transfer_pid(struct task_struct *old, struct task_struct *new,
void transfer_pid(struct task_struct *old, struct task_struct *new,
enum pid_type type)
{
new->pids[type].pid = old->pids[type].pid;
hlist_replace_rcu(&old->pids[type].node, &new->pids[type].node);
old->pids[type].pid = NULL;
}

struct task_struct * fastcall pid_task(struct pid *pid, enum pid_type type)
struct task_struct *pid_task(struct pid *pid, enum pid_type type)
{
struct task_struct *result = NULL;
if (pid) {
Expand Down Expand Up @@ -408,7 +408,7 @@ struct pid *get_task_pid(struct task_struct *task, enum pid_type type)
return pid;
}

struct task_struct *fastcall get_pid_task(struct pid *pid, enum pid_type type)
struct task_struct *get_pid_task(struct pid *pid, enum pid_type type)
{
struct task_struct *result;
rcu_read_lock();
Expand Down
16 changes: 8 additions & 8 deletions kernel/sched.c
Original file line number Diff line number Diff line change
Expand Up @@ -1893,13 +1893,13 @@ static int try_to_wake_up(struct task_struct *p, unsigned int state, int sync)
return success;
}

int fastcall wake_up_process(struct task_struct *p)
int wake_up_process(struct task_struct *p)
{
return try_to_wake_up(p, TASK_ALL, 0);
}
EXPORT_SYMBOL(wake_up_process);

int fastcall wake_up_state(struct task_struct *p, unsigned int state)
int wake_up_state(struct task_struct *p, unsigned int state)
{
return try_to_wake_up(p, state, 0);
}
Expand Down Expand Up @@ -1986,7 +1986,7 @@ void sched_fork(struct task_struct *p, int clone_flags)
* that must be done for every newly created context, then puts the task
* on the runqueue and wakes it.
*/
void fastcall wake_up_new_task(struct task_struct *p, unsigned long clone_flags)
void wake_up_new_task(struct task_struct *p, unsigned long clone_flags)
{
unsigned long flags;
struct rq *rq;
Expand Down Expand Up @@ -3753,7 +3753,7 @@ void scheduler_tick(void)

#if defined(CONFIG_PREEMPT) && defined(CONFIG_DEBUG_PREEMPT)

void fastcall add_preempt_count(int val)
void add_preempt_count(int val)
{
/*
* Underflow?
Expand All @@ -3769,7 +3769,7 @@ void fastcall add_preempt_count(int val)
}
EXPORT_SYMBOL(add_preempt_count);

void fastcall sub_preempt_count(int val)
void sub_preempt_count(int val)
{
/*
* Underflow?
Expand Down Expand Up @@ -4067,7 +4067,7 @@ static void __wake_up_common(wait_queue_head_t *q, unsigned int mode,
* @nr_exclusive: how many wake-one or wake-many threads to wake up
* @key: is directly passed to the wakeup function
*/
void fastcall __wake_up(wait_queue_head_t *q, unsigned int mode,
void __wake_up(wait_queue_head_t *q, unsigned int mode,
int nr_exclusive, void *key)
{
unsigned long flags;
Expand All @@ -4081,7 +4081,7 @@ EXPORT_SYMBOL(__wake_up);
/*
* Same as __wake_up but called with the spinlock in wait_queue_head_t held.
*/
void fastcall __wake_up_locked(wait_queue_head_t *q, unsigned int mode)
void __wake_up_locked(wait_queue_head_t *q, unsigned int mode)
{
__wake_up_common(q, mode, 1, 0, NULL);
}
Expand All @@ -4099,7 +4099,7 @@ void fastcall __wake_up_locked(wait_queue_head_t *q, unsigned int mode)
*
* On UP it can prevent extra preemption.
*/
void fastcall
void
__wake_up_sync(wait_queue_head_t *q, unsigned int mode, int nr_exclusive)
{
unsigned long flags;
Expand Down
Loading

0 comments on commit 7ad5b3a

Please sign in to comment.