Skip to content

Commit

Permalink
Merge branch 'for-3.7' of git://git.kernel.org/pub/scm/linux/kernel/g…
Browse files Browse the repository at this point in the history
…it/tj/wq

Pull workqueue changes from Tejun Heo:
 "This is workqueue updates for v3.7-rc1.  A lot of activities this
  round including considerable API and behavior cleanups.

   * delayed_work combines a timer and a work item.  The handling of the
     timer part has always been a bit clunky leading to confusing
     cancelation API with weird corner-case behaviors.  delayed_work is
     updated to use new IRQ safe timer and cancelation now works as
     expected.

   * Another deficiency of delayed_work was lack of the counterpart of
     mod_timer() which led to cancel+queue combinations or open-coded
     timer+work usages.  mod_delayed_work[_on]() are added.

     These two delayed_work changes make delayed_work provide interface
     and behave like timer which is executed with process context.

   * A work item could be executed concurrently on multiple CPUs, which
     is rather unintuitive and made flush_work() behavior confusing and
     half-broken under certain circumstances.  This problem doesn't
     exist for non-reentrant workqueues.  While non-reentrancy check
     isn't free, the overhead is incurred only when a work item bounces
     across different CPUs and even in simulated pathological scenario
     the overhead isn't too high.

     All workqueues are made non-reentrant.  This removes the
     distinction between flush_[delayed_]work() and
     flush_[delayed_]_work_sync().  The former is now as strong as the
     latter and the specified work item is guaranteed to have finished
     execution of any previous queueing on return.

   * In addition to the various bug fixes, Lai redid and simplified CPU
     hotplug handling significantly.

   * Joonsoo introduced system_highpri_wq and used it during CPU
     hotplug.

  There are two merge commits - one to pull in IRQ safe timer from
  tip/timers/core and the other to pull in CPU hotplug fixes from
  wq/for-3.6-fixes as Lai's hotplug restructuring depended on them."

Fixed a number of trivial conflicts, but the more interesting conflicts
were silent ones where the deprecated interfaces had been used by new
code in the merge window, and thus didn't cause any real data conflicts.

Tejun pointed out a few of them, I fixed a couple more.

* 'for-3.7' of git://git.kernel.org/pub/scm/linux/kernel/git/tj/wq: (46 commits)
  workqueue: remove spurious WARN_ON_ONCE(in_irq()) from try_to_grab_pending()
  workqueue: use cwq_set_max_active() helper for workqueue_set_max_active()
  workqueue: introduce cwq_set_max_active() helper for thaw_workqueues()
  workqueue: remove @delayed from cwq_dec_nr_in_flight()
  workqueue: fix possible stall on try_to_grab_pending() of a delayed work item
  workqueue: use hotcpu_notifier() for workqueue_cpu_down_callback()
  workqueue: use __cpuinit instead of __devinit for cpu callbacks
  workqueue: rename manager_mutex to assoc_mutex
  workqueue: WORKER_REBIND is no longer necessary for idle rebinding
  workqueue: WORKER_REBIND is no longer necessary for busy rebinding
  workqueue: reimplement idle worker rebinding
  workqueue: deprecate __cancel_delayed_work()
  workqueue: reimplement cancel_delayed_work() using try_to_grab_pending()
  workqueue: use mod_delayed_work() instead of __cancel + queue
  workqueue: use irqsafe timer for delayed_work
  workqueue: clean up delayed_work initializers and add missing one
  workqueue: make deferrable delayed_work initializer names consistent
  workqueue: cosmetic whitespace updates for macro definitions
  workqueue: deprecate system_nrt[_freezable]_wq
  workqueue: deprecate flush[_delayed]_work_sync()
  ...
  • Loading branch information
torvalds committed Oct 2, 2012
2 parents 974a847 + 7c6e72e commit 033d995
Show file tree
Hide file tree
Showing 134 changed files with 1,047 additions and 1,054 deletions.
4 changes: 2 additions & 2 deletions arch/arm/mach-pxa/sharpsl_pm.c
Original file line number Diff line number Diff line change
Expand Up @@ -579,8 +579,8 @@ static int sharpsl_ac_check(void)
static int sharpsl_pm_suspend(struct platform_device *pdev, pm_message_t state)
{
sharpsl_pm.flags |= SHARPSL_SUSPENDED;
flush_delayed_work_sync(&toggle_charger);
flush_delayed_work_sync(&sharpsl_bat);
flush_delayed_work(&toggle_charger);
flush_delayed_work(&sharpsl_bat);

if (sharpsl_pm.charge_mode == CHRG_ON)
sharpsl_pm.flags |= SHARPSL_DO_OFFLINE_CHRG;
Expand Down
2 changes: 1 addition & 1 deletion arch/arm/plat-omap/mailbox.c
Original file line number Diff line number Diff line change
Expand Up @@ -310,7 +310,7 @@ static void omap_mbox_fini(struct omap_mbox *mbox)
omap_mbox_disable_irq(mbox, IRQ_RX);
free_irq(mbox->irq, mbox);
tasklet_kill(&mbox->txq->tasklet);
flush_work_sync(&mbox->rxq->work);
flush_work(&mbox->rxq->work);
mbox_queue_free(mbox->txq);
mbox_queue_free(mbox->rxq);
}
Expand Down
2 changes: 1 addition & 1 deletion arch/powerpc/platforms/cell/cpufreq_spudemand.c
Original file line number Diff line number Diff line change
Expand Up @@ -76,7 +76,7 @@ static void spu_gov_work(struct work_struct *work)
static void spu_gov_init_work(struct spu_gov_info_struct *info)
{
int delay = usecs_to_jiffies(info->poll_int);
INIT_DELAYED_WORK_DEFERRABLE(&info->work, spu_gov_work);
INIT_DEFERRABLE_WORK(&info->work, spu_gov_work);
schedule_delayed_work_on(info->policy->cpu, &info->work, delay);
}

Expand Down
2 changes: 1 addition & 1 deletion arch/sh/drivers/push-switch.c
Original file line number Diff line number Diff line change
Expand Up @@ -107,7 +107,7 @@ static int switch_drv_remove(struct platform_device *pdev)
device_remove_file(&pdev->dev, &dev_attr_switch);

platform_set_drvdata(pdev, NULL);
flush_work_sync(&psw->work);
flush_work(&psw->work);
del_timer_sync(&psw->debounce);
free_irq(irq, pdev);

Expand Down
8 changes: 3 additions & 5 deletions block/blk-core.c
Original file line number Diff line number Diff line change
Expand Up @@ -262,7 +262,7 @@ EXPORT_SYMBOL(blk_start_queue);
**/
void blk_stop_queue(struct request_queue *q)
{
__cancel_delayed_work(&q->delay_work);
cancel_delayed_work(&q->delay_work);
queue_flag_set(QUEUE_FLAG_STOPPED, q);
}
EXPORT_SYMBOL(blk_stop_queue);
Expand Down Expand Up @@ -319,10 +319,8 @@ EXPORT_SYMBOL(__blk_run_queue);
*/
void blk_run_queue_async(struct request_queue *q)
{
if (likely(!blk_queue_stopped(q))) {
__cancel_delayed_work(&q->delay_work);
queue_delayed_work(kblockd_workqueue, &q->delay_work, 0);
}
if (likely(!blk_queue_stopped(q)))
mod_delayed_work(kblockd_workqueue, &q->delay_work, 0);
}
EXPORT_SYMBOL(blk_run_queue_async);

Expand Down
14 changes: 4 additions & 10 deletions block/blk-throttle.c
Original file line number Diff line number Diff line change
Expand Up @@ -180,7 +180,7 @@ static inline unsigned int total_nr_queued(struct throtl_data *td)

/*
* Worker for allocating per cpu stat for tgs. This is scheduled on the
* system_nrt_wq once there are some groups on the alloc_list waiting for
* system_wq once there are some groups on the alloc_list waiting for
* allocation.
*/
static void tg_stats_alloc_fn(struct work_struct *work)
Expand All @@ -194,8 +194,7 @@ static void tg_stats_alloc_fn(struct work_struct *work)
stats_cpu = alloc_percpu(struct tg_stats_cpu);
if (!stats_cpu) {
/* allocation failed, try again after some time */
queue_delayed_work(system_nrt_wq, dwork,
msecs_to_jiffies(10));
schedule_delayed_work(dwork, msecs_to_jiffies(10));
return;
}
}
Expand Down Expand Up @@ -238,7 +237,7 @@ static void throtl_pd_init(struct blkcg_gq *blkg)
*/
spin_lock_irqsave(&tg_stats_alloc_lock, flags);
list_add(&tg->stats_alloc_node, &tg_stats_alloc_list);
queue_delayed_work(system_nrt_wq, &tg_stats_alloc_work, 0);
schedule_delayed_work(&tg_stats_alloc_work, 0);
spin_unlock_irqrestore(&tg_stats_alloc_lock, flags);
}

Expand Down Expand Up @@ -930,12 +929,7 @@ throtl_schedule_delayed_work(struct throtl_data *td, unsigned long delay)

/* schedule work if limits changed even if no bio is queued */
if (total_nr_queued(td) || td->limits_changed) {
/*
* We might have a work scheduled to be executed in future.
* Cancel that and schedule a new one.
*/
__cancel_delayed_work(dwork);
queue_delayed_work(kthrotld_workqueue, dwork, delay);
mod_delayed_work(kthrotld_workqueue, dwork, delay);
throtl_log(td, "schedule work. delay=%lu jiffies=%lu",
delay, jiffies);
}
Expand Down
14 changes: 6 additions & 8 deletions block/genhd.c
Original file line number Diff line number Diff line change
Expand Up @@ -1490,9 +1490,9 @@ static void __disk_unblock_events(struct gendisk *disk, bool check_now)
intv = disk_events_poll_jiffies(disk);
set_timer_slack(&ev->dwork.timer, intv / 4);
if (check_now)
queue_delayed_work(system_nrt_freezable_wq, &ev->dwork, 0);
queue_delayed_work(system_freezable_wq, &ev->dwork, 0);
else if (intv)
queue_delayed_work(system_nrt_freezable_wq, &ev->dwork, intv);
queue_delayed_work(system_freezable_wq, &ev->dwork, intv);
out_unlock:
spin_unlock_irqrestore(&ev->lock, flags);
}
Expand Down Expand Up @@ -1534,10 +1534,8 @@ void disk_flush_events(struct gendisk *disk, unsigned int mask)

spin_lock_irq(&ev->lock);
ev->clearing |= mask;
if (!ev->block) {
cancel_delayed_work(&ev->dwork);
queue_delayed_work(system_nrt_freezable_wq, &ev->dwork, 0);
}
if (!ev->block)
mod_delayed_work(system_freezable_wq, &ev->dwork, 0);
spin_unlock_irq(&ev->lock);
}

Expand Down Expand Up @@ -1573,7 +1571,7 @@ unsigned int disk_clear_events(struct gendisk *disk, unsigned int mask)

/* uncondtionally schedule event check and wait for it to finish */
disk_block_events(disk);
queue_delayed_work(system_nrt_freezable_wq, &ev->dwork, 0);
queue_delayed_work(system_freezable_wq, &ev->dwork, 0);
flush_delayed_work(&ev->dwork);
__disk_unblock_events(disk, false);

Expand Down Expand Up @@ -1610,7 +1608,7 @@ static void disk_events_workfn(struct work_struct *work)

intv = disk_events_poll_jiffies(disk);
if (!ev->block && intv)
queue_delayed_work(system_nrt_freezable_wq, &ev->dwork, intv);
queue_delayed_work(system_freezable_wq, &ev->dwork, intv);

spin_unlock_irq(&ev->lock);

Expand Down
5 changes: 2 additions & 3 deletions drivers/block/floppy.c
Original file line number Diff line number Diff line change
Expand Up @@ -672,15 +672,14 @@ static void __reschedule_timeout(int drive, const char *message)

if (drive == current_reqD)
drive = current_drive;
__cancel_delayed_work(&fd_timeout);

if (drive < 0 || drive >= N_DRIVE) {
delay = 20UL * HZ;
drive = 0;
} else
delay = UDP->timeout;

queue_delayed_work(floppy_wq, &fd_timeout, delay);
mod_delayed_work(floppy_wq, &fd_timeout, delay);
if (UDP->flags & FD_DEBUG)
DPRINT("reschedule timeout %s\n", message);
timeout_message = message;
Expand Down Expand Up @@ -891,7 +890,7 @@ static void unlock_fdc(void)

raw_cmd = NULL;
command_status = FD_COMMAND_NONE;
__cancel_delayed_work(&fd_timeout);
cancel_delayed_work(&fd_timeout);
do_floppy = NULL;
cont = NULL;
clear_bit(0, &fdc_busy);
Expand Down
4 changes: 2 additions & 2 deletions drivers/block/xen-blkfront.c
Original file line number Diff line number Diff line change
Expand Up @@ -670,7 +670,7 @@ static void xlvbd_release_gendisk(struct blkfront_info *info)
spin_unlock_irqrestore(&info->io_lock, flags);

/* Flush gnttab callback work. Must be done with no locks held. */
flush_work_sync(&info->work);
flush_work(&info->work);

del_gendisk(info->gd);

Expand Down Expand Up @@ -719,7 +719,7 @@ static void blkif_free(struct blkfront_info *info, int suspend)
spin_unlock_irq(&info->io_lock);

/* Flush gnttab callback work. Must be done with no locks held. */
flush_work_sync(&info->work);
flush_work(&info->work);

/* Free resources associated with old device channel. */
if (info->ring_ref != GRANT_INVALID_REF) {
Expand Down
2 changes: 1 addition & 1 deletion drivers/cdrom/gdrom.c
Original file line number Diff line number Diff line change
Expand Up @@ -840,7 +840,7 @@ static int __devinit probe_gdrom(struct platform_device *devptr)

static int __devexit remove_gdrom(struct platform_device *devptr)
{
flush_work_sync(&work);
flush_work(&work);
blk_cleanup_queue(gd.gdrom_rq);
free_irq(HW_EVENT_GDROM_CMD, &gd);
free_irq(HW_EVENT_GDROM_DMA, &gd);
Expand Down
2 changes: 1 addition & 1 deletion drivers/char/sonypi.c
Original file line number Diff line number Diff line change
Expand Up @@ -1433,7 +1433,7 @@ static int __devexit sonypi_remove(struct platform_device *dev)
sonypi_disable();

synchronize_irq(sonypi_device.irq);
flush_work_sync(&sonypi_device.input_work);
flush_work(&sonypi_device.input_work);

if (useinput) {
input_unregister_device(sonypi_device.input_key_dev);
Expand Down
4 changes: 2 additions & 2 deletions drivers/char/tpm/tpm.c
Original file line number Diff line number Diff line change
Expand Up @@ -1172,7 +1172,7 @@ int tpm_release(struct inode *inode, struct file *file)
struct tpm_chip *chip = file->private_data;

del_singleshot_timer_sync(&chip->user_read_timer);
flush_work_sync(&chip->work);
flush_work(&chip->work);
file->private_data = NULL;
atomic_set(&chip->data_pending, 0);
kfree(chip->data_buffer);
Expand Down Expand Up @@ -1225,7 +1225,7 @@ ssize_t tpm_read(struct file *file, char __user *buf,
int rc;

del_singleshot_timer_sync(&chip->user_read_timer);
flush_work_sync(&chip->work);
flush_work(&chip->work);
ret_size = atomic_read(&chip->data_pending);
atomic_set(&chip->data_pending, 0);
if (ret_size > 0) { /* relay data */
Expand Down
2 changes: 1 addition & 1 deletion drivers/cpufreq/cpufreq_conservative.c
Original file line number Diff line number Diff line change
Expand Up @@ -466,7 +466,7 @@ static inline void dbs_timer_init(struct cpu_dbs_info_s *dbs_info)
delay -= jiffies % delay;

dbs_info->enable = 1;
INIT_DELAYED_WORK_DEFERRABLE(&dbs_info->work, do_dbs_timer);
INIT_DEFERRABLE_WORK(&dbs_info->work, do_dbs_timer);
schedule_delayed_work_on(dbs_info->cpu, &dbs_info->work, delay);
}

Expand Down
2 changes: 1 addition & 1 deletion drivers/cpufreq/cpufreq_ondemand.c
Original file line number Diff line number Diff line change
Expand Up @@ -644,7 +644,7 @@ static inline void dbs_timer_init(struct cpu_dbs_info_s *dbs_info)
delay -= jiffies % delay;

dbs_info->sample_type = DBS_NORMAL_SAMPLE;
INIT_DELAYED_WORK_DEFERRABLE(&dbs_info->work, do_dbs_timer);
INIT_DEFERRABLE_WORK(&dbs_info->work, do_dbs_timer);
schedule_delayed_work_on(dbs_info->cpu, &dbs_info->work, delay);
}

Expand Down
2 changes: 1 addition & 1 deletion drivers/devfreq/devfreq.c
Original file line number Diff line number Diff line change
Expand Up @@ -607,7 +607,7 @@ static int __init devfreq_start_polling(void)
mutex_lock(&devfreq_list_lock);
polling = false;
devfreq_wq = create_freezable_workqueue("devfreq_wq");
INIT_DELAYED_WORK_DEFERRABLE(&devfreq_work, devfreq_monitor);
INIT_DEFERRABLE_WORK(&devfreq_work, devfreq_monitor);
mutex_unlock(&devfreq_list_lock);

devfreq_monitor(&devfreq_work.work);
Expand Down
17 changes: 1 addition & 16 deletions drivers/edac/edac_mc.c
Original file line number Diff line number Diff line change
Expand Up @@ -559,7 +559,7 @@ static void edac_mc_workq_setup(struct mem_ctl_info *mci, unsigned msec)
return;

INIT_DELAYED_WORK(&mci->work, edac_mc_workq_function);
queue_delayed_work(edac_workqueue, &mci->work, msecs_to_jiffies(msec));
mod_delayed_work(edac_workqueue, &mci->work, msecs_to_jiffies(msec));
}

/*
Expand Down Expand Up @@ -599,21 +599,6 @@ void edac_mc_reset_delay_period(int value)

mutex_lock(&mem_ctls_mutex);

/* scan the list and turn off all workq timers, doing so under lock
*/
list_for_each(item, &mc_devices) {
mci = list_entry(item, struct mem_ctl_info, link);

if (mci->op_state == OP_RUNNING_POLL)
cancel_delayed_work(&mci->work);
}

mutex_unlock(&mem_ctls_mutex);


/* re-walk the list, and reset the poll delay */
mutex_lock(&mem_ctls_mutex);

list_for_each(item, &mc_devices) {
mci = list_entry(item, struct mem_ctl_info, link);

Expand Down
2 changes: 1 addition & 1 deletion drivers/extcon/extcon-adc-jack.c
Original file line number Diff line number Diff line change
Expand Up @@ -143,7 +143,7 @@ static int __devinit adc_jack_probe(struct platform_device *pdev)

data->handling_delay = msecs_to_jiffies(pdata->handling_delay_ms);

INIT_DELAYED_WORK_DEFERRABLE(&data->handler, adc_jack_handler);
INIT_DEFERRABLE_WORK(&data->handler, adc_jack_handler);

platform_set_drvdata(pdev, data);

Expand Down
6 changes: 3 additions & 3 deletions drivers/gpu/drm/drm_crtc_helper.c
Original file line number Diff line number Diff line change
Expand Up @@ -968,7 +968,7 @@ static void output_poll_execute(struct work_struct *work)
}

if (repoll)
queue_delayed_work(system_nrt_wq, delayed_work, DRM_OUTPUT_POLL_PERIOD);
schedule_delayed_work(delayed_work, DRM_OUTPUT_POLL_PERIOD);
}

void drm_kms_helper_poll_disable(struct drm_device *dev)
Expand All @@ -993,7 +993,7 @@ void drm_kms_helper_poll_enable(struct drm_device *dev)
}

if (poll)
queue_delayed_work(system_nrt_wq, &dev->mode_config.output_poll_work, DRM_OUTPUT_POLL_PERIOD);
schedule_delayed_work(&dev->mode_config.output_poll_work, DRM_OUTPUT_POLL_PERIOD);
}
EXPORT_SYMBOL(drm_kms_helper_poll_enable);

Expand All @@ -1020,6 +1020,6 @@ void drm_helper_hpd_irq_event(struct drm_device *dev)
/* kill timer and schedule immediate execution, this doesn't block */
cancel_delayed_work(&dev->mode_config.output_poll_work);
if (drm_kms_helper_poll)
queue_delayed_work(system_nrt_wq, &dev->mode_config.output_poll_work, 0);
schedule_delayed_work(&dev->mode_config.output_poll_work, 0);
}
EXPORT_SYMBOL(drm_helper_hpd_irq_event);
2 changes: 1 addition & 1 deletion drivers/gpu/drm/exynos/exynos_drm_g2d.c
Original file line number Diff line number Diff line change
Expand Up @@ -878,7 +878,7 @@ static int g2d_suspend(struct device *dev)
/* FIXME: good range? */
usleep_range(500, 1000);

flush_work_sync(&g2d->runqueue_work);
flush_work(&g2d->runqueue_work);

return 0;
}
Expand Down
2 changes: 1 addition & 1 deletion drivers/gpu/drm/nouveau/nouveau_gpio.c
Original file line number Diff line number Diff line change
Expand Up @@ -302,7 +302,7 @@ nouveau_gpio_isr_del(struct drm_device *dev, int idx, u8 tag, u8 line,
spin_unlock_irqrestore(&pgpio->lock, flags);

list_for_each_entry_safe(isr, tmp, &tofree, head) {
flush_work_sync(&isr->work);
flush_work(&isr->work);
kfree(isr);
}
}
Expand Down
2 changes: 1 addition & 1 deletion drivers/gpu/drm/radeon/radeon_irq_kms.c
Original file line number Diff line number Diff line change
Expand Up @@ -277,7 +277,7 @@ void radeon_irq_kms_fini(struct radeon_device *rdev)
if (rdev->msi_enabled)
pci_disable_msi(rdev->pdev);
}
flush_work_sync(&rdev->hotplug_work);
flush_work(&rdev->hotplug_work);
}

/**
Expand Down
2 changes: 1 addition & 1 deletion drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
Original file line number Diff line number Diff line change
Expand Up @@ -594,7 +594,7 @@ int vmw_fb_off(struct vmw_private *vmw_priv)
par->dirty.active = false;
spin_unlock_irqrestore(&par->dirty.lock, flags);

flush_delayed_work_sync(&info->deferred_work);
flush_delayed_work(&info->deferred_work);

par->bo_ptr = NULL;
ttm_bo_kunmap(&par->map);
Expand Down
2 changes: 1 addition & 1 deletion drivers/hid/hid-picolcd_fb.c
Original file line number Diff line number Diff line change
Expand Up @@ -608,7 +608,7 @@ void picolcd_exit_framebuffer(struct picolcd_data *data)
/* make sure there is no running update - thus that fbdata->picolcd
* once obtained under lock is guaranteed not to get free() under
* the feet of the deferred work */
flush_delayed_work_sync(&info->deferred_work);
flush_delayed_work(&info->deferred_work);

data->fb_info = NULL;
unregister_framebuffer(info);
Expand Down
2 changes: 1 addition & 1 deletion drivers/hid/hid-wiimote-ext.c
Original file line number Diff line number Diff line change
Expand Up @@ -229,7 +229,7 @@ static void wiiext_worker(struct work_struct *work)
/* schedule work only once, otherwise mark for reschedule */
static void wiiext_schedule(struct wiimote_ext *ext)
{
queue_work(system_nrt_wq, &ext->worker);
schedule_work(&ext->worker);
}

/*
Expand Down
4 changes: 1 addition & 3 deletions drivers/infiniband/core/addr.c
Original file line number Diff line number Diff line change
Expand Up @@ -152,13 +152,11 @@ static void set_timeout(unsigned long time)
{
unsigned long delay;

cancel_delayed_work(&work);

delay = time - jiffies;
if ((long)delay <= 0)
delay = 1;

queue_delayed_work(addr_wq, &work, delay);
mod_delayed_work(addr_wq, &work, delay);
}

static void queue_req(struct addr_req *req)
Expand Down
Loading

0 comments on commit 033d995

Please sign in to comment.