Skip to content

Commit

Permalink
padata: Remove broken queue flushing
Browse files Browse the repository at this point in the history
The function padata_flush_queues is fundamentally broken because
it cannot force padata users to complete the request that is
underway.  IOW padata has to passively wait for the completion
of any outstanding work.

As it stands flushing is used in two places.  Its use in padata_stop
is simply unnecessary because nothing depends on the queues to
be flushed afterwards.

The other use in padata_replace is more substantial as we depend
on it to free the old pd structure.  This patch instead uses the
pd->refcnt to dynamically free the pd structure once all requests
are complete.

Fixes: 2b73b07 ("padata: Flush the padata queues actively")
Cc: <[email protected]>
Signed-off-by: Herbert Xu <[email protected]>
Reviewed-by: Daniel Jordan <[email protected]>
Signed-off-by: Herbert Xu <[email protected]>
  • Loading branch information
herbertx committed Dec 11, 2019
1 parent 3f61b05 commit 07928d9
Showing 1 changed file with 12 additions and 31 deletions.
43 changes: 12 additions & 31 deletions kernel/padata.c
Original file line number Diff line number Diff line change
Expand Up @@ -35,6 +35,8 @@

#define MAX_OBJ_NUM 1000

static void padata_free_pd(struct parallel_data *pd);

static int padata_index_to_cpu(struct parallel_data *pd, int cpu_index)
{
int cpu, target_cpu;
Expand Down Expand Up @@ -283,6 +285,7 @@ static void padata_serial_worker(struct work_struct *serial_work)
struct padata_serial_queue *squeue;
struct parallel_data *pd;
LIST_HEAD(local_list);
int cnt;

local_bh_disable();
squeue = container_of(serial_work, struct padata_serial_queue, work);
Expand All @@ -292,6 +295,8 @@ static void padata_serial_worker(struct work_struct *serial_work)
list_replace_init(&squeue->serial.list, &local_list);
spin_unlock(&squeue->serial.lock);

cnt = 0;

while (!list_empty(&local_list)) {
struct padata_priv *padata;

Expand All @@ -301,9 +306,12 @@ static void padata_serial_worker(struct work_struct *serial_work)
list_del_init(&padata->list);

padata->serial(padata);
atomic_dec(&pd->refcnt);
cnt++;
}
local_bh_enable();

if (atomic_sub_and_test(cnt, &pd->refcnt))
padata_free_pd(pd);
}

/**
Expand Down Expand Up @@ -440,7 +448,7 @@ static struct parallel_data *padata_alloc_pd(struct padata_instance *pinst,
padata_init_squeues(pd);
atomic_set(&pd->seq_nr, -1);
atomic_set(&pd->reorder_objects, 0);
atomic_set(&pd->refcnt, 0);
atomic_set(&pd->refcnt, 1);
spin_lock_init(&pd->lock);
pd->cpu = cpumask_first(pd->cpumask.pcpu);
INIT_WORK(&pd->reorder_work, invoke_padata_reorder);
Expand All @@ -466,29 +474,6 @@ static void padata_free_pd(struct parallel_data *pd)
kfree(pd);
}

/* Flush all objects out of the padata queues. */
static void padata_flush_queues(struct parallel_data *pd)
{
int cpu;
struct padata_parallel_queue *pqueue;
struct padata_serial_queue *squeue;

for_each_cpu(cpu, pd->cpumask.pcpu) {
pqueue = per_cpu_ptr(pd->pqueue, cpu);
flush_work(&pqueue->work);
}

if (atomic_read(&pd->reorder_objects))
padata_reorder(pd);

for_each_cpu(cpu, pd->cpumask.cbcpu) {
squeue = per_cpu_ptr(pd->squeue, cpu);
flush_work(&squeue->work);
}

BUG_ON(atomic_read(&pd->refcnt) != 0);
}

static void __padata_start(struct padata_instance *pinst)
{
pinst->flags |= PADATA_INIT;
Expand All @@ -502,10 +487,6 @@ static void __padata_stop(struct padata_instance *pinst)
pinst->flags &= ~PADATA_INIT;

synchronize_rcu();

get_online_cpus();
padata_flush_queues(pinst->pd);
put_online_cpus();
}

/* Replace the internal control structure with a new one. */
Expand All @@ -526,8 +507,8 @@ static void padata_replace(struct padata_instance *pinst,
if (!cpumask_equal(pd_old->cpumask.cbcpu, pd_new->cpumask.cbcpu))
notification_mask |= PADATA_CPU_SERIAL;

padata_flush_queues(pd_old);
padata_free_pd(pd_old);
if (atomic_dec_and_test(&pd_old->refcnt))
padata_free_pd(pd_old);

if (notification_mask)
blocking_notifier_call_chain(&pinst->cpumask_change_notifier,
Expand Down

0 comments on commit 07928d9

Please sign in to comment.