Skip to content

Commit

Permalink
Merge branch 'for-3.5' of git://git.kernel.org/pub/scm/linux/kernel/g…
Browse files Browse the repository at this point in the history
…it/tj/wq

Pull workqueue changes from Tejun Heo:
 "Nothing exciting.  Most are updates to debug stuff and related fixes.
  Two not-too-critical bugs are fixed - WARN_ON() triggering spurious
  during cpu offlining and unlikely lockdep related oops."

* 'for-3.5' of git://git.kernel.org/pub/scm/linux/kernel/git/tj/wq:
  lockdep: fix oops in processing workqueue
  workqueue: skip nr_running sanity check in worker_enter_idle() if trustee is active
  workqueue: Catch more locking problems with flush_work()
  workqueue: change BUG_ON() to WARN_ON()
  trace: Remove unused workqueue tracer
  • Loading branch information
torvalds committed May 23, 2012
2 parents fb09baf + 4d82a1d commit c54894c
Show file tree
Hide file tree
Showing 5 changed files with 38 additions and 306 deletions.
18 changes: 18 additions & 0 deletions include/linux/lockdep.h
Original file line number Diff line number Diff line change
Expand Up @@ -157,6 +157,24 @@ struct lockdep_map {
#endif
};

static inline void lockdep_copy_map(struct lockdep_map *to,
struct lockdep_map *from)
{
int i;

*to = *from;
/*
* Since the class cache can be modified concurrently we could observe
* half pointers (64bit arch using 32bit copy insns). Therefore clear
* the caches and take the performance hit.
*
* XXX it doesn't work well with lockdep_set_class_and_subclass(), since
* that relies on cache abuse.
*/
for (i = 0; i < NR_LOCKDEP_CACHING_CLASSES; i++)
to->class_cache[i] = NULL;
}

/*
* Every lock has a list of other locks that were taken after it.
* We only grow the list, never remove from it:
Expand Down
4 changes: 3 additions & 1 deletion kernel/timer.c
Original file line number Diff line number Diff line change
Expand Up @@ -1108,7 +1108,9 @@ static void call_timer_fn(struct timer_list *timer, void (*fn)(unsigned long),
* warnings as well as problems when looking into
* timer->lockdep_map, make a copy and use that here.
*/
struct lockdep_map lockdep_map = timer->lockdep_map;
struct lockdep_map lockdep_map;

lockdep_copy_map(&lockdep_map, &timer->lockdep_map);
#endif
/*
* Couple the lock chain with the lock chain at
Expand Down
1 change: 0 additions & 1 deletion kernel/trace/Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -41,7 +41,6 @@ obj-$(CONFIG_STACK_TRACER) += trace_stack.o
obj-$(CONFIG_MMIOTRACE) += trace_mmiotrace.o
obj-$(CONFIG_FUNCTION_GRAPH_TRACER) += trace_functions_graph.o
obj-$(CONFIG_TRACE_BRANCH_PROFILING) += trace_branch.o
obj-$(CONFIG_WORKQUEUE_TRACER) += trace_workqueue.o
obj-$(CONFIG_BLK_DEV_IO_TRACE) += blktrace.o
ifeq ($(CONFIG_BLOCK),y)
obj-$(CONFIG_EVENT_TRACING) += blktrace.o
Expand Down
300 changes: 0 additions & 300 deletions kernel/trace/trace_workqueue.c

This file was deleted.

21 changes: 17 additions & 4 deletions kernel/workqueue.c
Original file line number Diff line number Diff line change
Expand Up @@ -1032,7 +1032,10 @@ static void __queue_work(unsigned int cpu, struct workqueue_struct *wq,
cwq = get_cwq(gcwq->cpu, wq);
trace_workqueue_queue_work(cpu, cwq, work);

BUG_ON(!list_empty(&work->entry));
if (WARN_ON(!list_empty(&work->entry))) {
spin_unlock_irqrestore(&gcwq->lock, flags);
return;
}

cwq->nr_in_flight[cwq->work_color]++;
work_flags = work_color_to_flags(cwq->work_color);
Expand Down Expand Up @@ -1210,8 +1213,13 @@ static void worker_enter_idle(struct worker *worker)
} else
wake_up_all(&gcwq->trustee_wait);

/* sanity check nr_running */
WARN_ON_ONCE(gcwq->nr_workers == gcwq->nr_idle &&
/*
* Sanity check nr_running. Because trustee releases gcwq->lock
* between setting %WORKER_ROGUE and zapping nr_running, the
* warning may trigger spuriously. Check iff trustee is idle.
*/
WARN_ON_ONCE(gcwq->trustee_state == TRUSTEE_DONE &&
gcwq->nr_workers == gcwq->nr_idle &&
atomic_read(get_gcwq_nr_running(gcwq->cpu)));
}

Expand Down Expand Up @@ -1810,7 +1818,9 @@ __acquires(&gcwq->lock)
* lock freed" warnings as well as problems when looking into
* work->lockdep_map, make a copy and use that here.
*/
struct lockdep_map lockdep_map = work->lockdep_map;
struct lockdep_map lockdep_map;

lockdep_copy_map(&lockdep_map, &work->lockdep_map);
#endif
/*
* A single work shouldn't be executed concurrently by
Expand Down Expand Up @@ -2506,6 +2516,9 @@ bool flush_work(struct work_struct *work)
{
struct wq_barrier barr;

lock_map_acquire(&work->lockdep_map);
lock_map_release(&work->lockdep_map);

if (start_flush_work(work, &barr, true)) {
wait_for_completion(&barr.done);
destroy_work_on_stack(&barr.work);
Expand Down

0 comments on commit c54894c

Please sign in to comment.